hexsha
stringlengths
40
40
size
int64
7
1.04M
ext
stringclasses
10 values
lang
stringclasses
1 value
max_stars_repo_path
stringlengths
4
247
max_stars_repo_name
stringlengths
4
125
max_stars_repo_head_hexsha
stringlengths
40
78
max_stars_repo_licenses
listlengths
1
10
max_stars_count
int64
1
368k
max_stars_repo_stars_event_min_datetime
stringlengths
24
24
max_stars_repo_stars_event_max_datetime
stringlengths
24
24
max_issues_repo_path
stringlengths
4
247
max_issues_repo_name
stringlengths
4
125
max_issues_repo_head_hexsha
stringlengths
40
78
max_issues_repo_licenses
listlengths
1
10
max_issues_count
int64
1
116k
max_issues_repo_issues_event_min_datetime
stringlengths
24
24
max_issues_repo_issues_event_max_datetime
stringlengths
24
24
max_forks_repo_path
stringlengths
4
247
max_forks_repo_name
stringlengths
4
125
max_forks_repo_head_hexsha
stringlengths
40
78
max_forks_repo_licenses
listlengths
1
10
max_forks_count
int64
1
105k
max_forks_repo_forks_event_min_datetime
stringlengths
24
24
max_forks_repo_forks_event_max_datetime
stringlengths
24
24
content
stringlengths
1
1.04M
avg_line_length
float64
1.77
618k
max_line_length
int64
1
1.02M
alphanum_fraction
float64
0
1
original_content
stringlengths
7
1.04M
filtered:remove_function_no_docstring
int64
-102
942k
filtered:remove_class_no_docstring
int64
-354
977k
filtered:remove_delete_markers
int64
0
60.1k
542ed650cd51d1be301e72556d0e3aecf6a5572b
2,711
py
Python
tests/test_wsgisubdomain.py
xsleonard/wsgisubdomain
394c705a9e39cf4e4e1af2551ed0ed79d2c070f3
[ "MIT" ]
2
2016-06-15T09:31:20.000Z
2016-06-20T11:43:01.000Z
tests/test_wsgisubdomain.py
xsleonard/wsgisubdomain
394c705a9e39cf4e4e1af2551ed0ed79d2c070f3
[ "MIT" ]
null
null
null
tests/test_wsgisubdomain.py
xsleonard/wsgisubdomain
394c705a9e39cf4e4e1af2551ed0ed79d2c070f3
[ "MIT" ]
null
null
null
from unittest import TestCase from wsgisubdomain import SubdomainDispatcher
37.136986
79
0.620435
from unittest import TestCase from wsgisubdomain import SubdomainDispatcher class TestSubdomainDispatcher(TestCase): def assertIs(self, a, b): try: super(TestSubdomainDispatcher, self).assertIs(a, b) except AttributeError: # python 2.6 support self.assertTrue(a is b) @staticmethod def create_app(subdomain=None): return lambda x, y: subdomain def test_create(self): s = SubdomainDispatcher(1) self.assertEqual(s.create_application, 1) self.assertTrue(hasattr(s, 'lock')) self.assertFalse(s.instances) def test_get_host(self): environ = dict(HTTP_HOST='xxx.example.com', SERVER_NAME='example.com') self.assertEqual('xxx.example.com', SubdomainDispatcher._get_host(environ)) del environ['HTTP_HOST'] self.assertEqual('example.com', SubdomainDispatcher._get_host(environ)) def test_extract_subdomain(self): # Test with ip address, returns None s = SubdomainDispatcher h = '127.0.0.1' self.assertIs(s._extract_subdomain(h), None) self.assertIs(s._extract_subdomain(h + ':888'), None) # Test with no subdomain h = 'example.com' self.assertEqual(s._extract_subdomain(h), '') self.assertEqual(s._extract_subdomain(h + ':888'), '') # Test with a single subdomain h = 'www.example.com' self.assertEqual(s._extract_subdomain(h), 'www') self.assertEqual(s._extract_subdomain(h + ':888'), 'www') # Test with multiple subdomains h = 'a.b.c.d.example.com' self.assertEqual(s._extract_subdomain(h), 'a.b.c.d') self.assertEqual(s._extract_subdomain(h + ':888'), 'a.b.c.d') def test_get_application(self): s = SubdomainDispatcher(self.create_app) environ = dict(HTTP_HOST='xxx.example.com', SERVER_NAME='example.com') app = s.get_application(environ) self.assertEqual(app(0, 0), 'xxx') environ['HTTP_HOST'] = 'example.com' app = s.get_application(environ) self.assertEqual(app(0, 0), '') environ['HTTP_HOST'] = '127.0.0.1' app = s.get_application(environ) self.assertIs(app(0, 0), None) def test_call(self): s = SubdomainDispatcher(self.create_app) environ = dict(HTTP_HOST='xxx.example.com', SERVER_NAME='example.com') app = s(environ, None) self.assertEqual(app, 'xxx') environ['HTTP_HOST'] = 'example.com' app = s(environ, None) self.assertEqual(app, '') environ['HTTP_HOST'] = '127.0.0.1' app = s(environ, None) self.assertIs(app, None)
2,385
226
23
5575aa3a08f72b6233406337d9b98d6034623d01
26,482
py
Python
tests/unit_tests/test_set_dropin.py
realead/cykhash
b1a45843c3be49cd232d3c78315d2291a830284f
[ "MIT" ]
18
2019-03-13T08:20:06.000Z
2021-06-22T08:03:01.000Z
tests/unit_tests/test_set_dropin.py
realead/cykhash
b1a45843c3be49cd232d3c78315d2291a830284f
[ "MIT" ]
6
2020-04-13T10:11:45.000Z
2021-11-14T15:59:55.000Z
tests/unit_tests/test_set_dropin.py
realead/cykhash
b1a45843c3be49cd232d3c78315d2291a830284f
[ "MIT" ]
7
2019-05-19T22:24:57.000Z
2020-08-26T23:01:23.000Z
import pytest from unittestmock import UnitTestMock from cykhash import Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet import cykhash SUFFIX={Int64Set : "int64", Int32Set : "int32", Float64Set : "float64", Float32Set : "float32", PyObjectSet : "pyobject"} @pytest.mark.parametrize( "set_type", [Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet] ) @pytest.mark.parametrize( "set_type", [Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet] ) @pytest.mark.parametrize( "set_type", [Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet] ) @pytest.mark.parametrize( "set_type", [Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet] ) @pytest.mark.parametrize( "set_type", [Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet] ) @pytest.mark.parametrize( "set_type", [Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet] ) @pytest.mark.parametrize( "set_type", [Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet] ) @pytest.mark.parametrize( "set_type", [Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet] ) @pytest.mark.parametrize( "set_type", [Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet] )
32.099394
85
0.601088
import pytest from unittestmock import UnitTestMock from cykhash import Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet import cykhash SUFFIX={Int64Set : "int64", Int32Set : "int32", Float64Set : "float64", Float32Set : "float32", PyObjectSet : "pyobject"} def pick_fun(name, set_type): return getattr(cykhash, name+"_"+SUFFIX[set_type]) @pytest.mark.parametrize( "set_type", [Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet] ) class TestSetDropIn(UnitTestMock): def test_init_from_iter(self, set_type): s=set_type([1,2,3,1]) self.assertEqual(len(s), 3) self.assertTrue(1 in s) self.assertTrue(2 in s) self.assertTrue(3 in s) def test_clear(self, set_type): s=set_type([1,2,3,1]) s.clear() self.assertEqual(len(s), 0) s.add(5) s.update([3,4,5,6]) self.assertEqual(s, set_type([3,4,5,6])) s.clear() self.assertEqual(len(s), 0) def test_str(self, set_type): s=set_type([1,2,3,1]) ss = str(s) self.assertTrue("1" in ss) self.assertTrue("2" in ss) self.assertTrue("3" in ss) self.assertTrue(ss.startswith("{")) self.assertTrue(ss.endswith("}")) def test_remove_yes(self, set_type): s=set_type([1,2]) s.remove(1) self.assertEqual(s,set_type([2])) s.remove(2) self.assertEqual(s,set_type([])) def test_remove_no(self, set_type): s=set_type([1,2]) with pytest.raises(KeyError) as context: s.remove(3) self.assertEqual(3, context.value.args[0]) def test_pop_one(self, set_type): s=set_type([1]) el=s.pop() self.assertEqual(s,set_type([])) self.assertEqual(el,1) def test_pop_all(self, set_type): s=set_type([1,2,3]) new_s={s.pop(), s.pop(), s.pop()} self.assertEqual(s,set_type([])) self.assertEqual(new_s,{1,2,3}) def test_pop_empty(self, set_type): s=set_type([]) with pytest.raises(KeyError) as context: s.pop() self.assertEqual("pop from empty set", context.value.args[0]) def test_pyobject_same_object_pop(): a=float("3333.2") s=PyObjectSet([a]) b=s.pop() assert a is b @pytest.mark.parametrize( "set_type", [Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet] ) class TestIsDisjoint(UnitTestMock): def test_aredisjoint_with_none(self, set_type): s=set_type([1,2,3,1]) fun=pick_fun("aredisjoint", set_type) with pytest.raises(TypeError) as context: fun(None,s) self.assertTrue("'NoneType' object is not iterable" in context.value.args[0]) with pytest.raises(TypeError) as context: fun(s,None) self.assertTrue("'NoneType' object is not iterable" in context.value.args[0]) with pytest.raises(TypeError) as context: fun(None,None) self.assertTrue("'NoneType' object is not iterable" in context.value.args[0]) def test_aredisjoint_with_empty(self, set_type): empty1=set_type() empty2=set_type() non_empty=set_type(range(3)) aredisjoint=pick_fun("aredisjoint", set_type) self.assertEqual(aredisjoint(empty1, non_empty), True) self.assertEqual(aredisjoint(non_empty, empty2), True) self.assertEqual(aredisjoint(empty1, empty2), True) def test_aredisjoint_yes(self, set_type): a=set_type([1,2,3,1]) b=set_type([4,55]) fun=pick_fun("aredisjoint", set_type) self.assertEqual(fun(a,b), True) self.assertEqual(fun(b,a), True) def test_aredisjoint_no(self, set_type): a=set_type([1,2,3,333,1]) b=set_type([4,55,4,5,6,7,333]) fun=pick_fun("aredisjoint", set_type) self.assertEqual(fun(a,b), False) self.assertEqual(fun(b,a), False) def test_isdisjoint_yes_set(self, set_type): a=set_type([1,2,3,1]) b=set_type([4,55]) self.assertEqual(a.isdisjoint(b), True) self.assertEqual(b.isdisjoint(a), True) def test_isdisjoint_no_set(self, set_type): a=set_type([1,2,3,333,1]) b=set_type([4,55,4,5,6,7,333]) self.assertEqual(a.isdisjoint(b), False) self.assertEqual(b.isdisjoint(a), False) def test_isdisjoint_yes_iter(self, set_type): a=set_type([1,2,3,1]) b=[4,55] self.assertEqual(a.isdisjoint(b), True) def test_isdisjoint_no_iter(self, set_type): a=set_type([1,2,3,333,1]) b=[4,55,4,5,6,7,333] self.assertEqual(a.isdisjoint(b), False) @pytest.mark.parametrize( "set_type", [Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet] ) class TestIsSubsetIsSuperset(UnitTestMock): def test_with_none(self, set_type): s=set_type([1,2,3,1]) fun=pick_fun("issubset", set_type) with pytest.raises(TypeError) as context: fun(None,s) self.assertTrue("'NoneType' object is not iterable" in context.value.args[0]) with pytest.raises(TypeError) as context: fun(s,None) self.assertTrue("'NoneType' object is not iterable" in context.value.args[0]) with pytest.raises(TypeError) as context: fun(None,None) self.assertTrue("'NoneType' object is not iterable" in context.value.args[0]) def test_with_empty(self, set_type): a=set_type([1,2,3,1]) b=set_type([]) fun=pick_fun("issubset", set_type) self.assertEqual(fun(a,a), True) self.assertEqual(fun(a,b), True) self.assertEqual(fun(b,a), False) self.assertEqual(fun(b,b), True) def test_yes(self, set_type): a=set_type([1,2,3,1]) b=set_type([1,3]) fun=pick_fun("issubset", set_type) self.assertEqual(fun(a,b), True) self.assertEqual(fun(b,a), False) def test_no(self, set_type): a=set_type([1,2,3,1]) b=set_type([4]) fun=pick_fun("issubset", set_type) self.assertEqual(fun(a,b), False) self.assertEqual(fun(b,a), False) def test_issuperset_yes(self, set_type): a=set_type([1,2,3,1]) b=set_type([1,3]) self.assertEqual(a.issuperset(b), True) self.assertEqual(b.issuperset(a), False) def test_issuperset_no(self, set_type): a=set_type([1,2,3,1]) b=set_type([4]) self.assertEqual(a.issuperset(b), False) self.assertEqual(b.issuperset(a), False) def test_issuperset_yes_iter(self, set_type): a=set_type([1,2,3,1]) b=[1,3] self.assertEqual(a.issuperset(b), True) def test_issuperset_no_iter(self, set_type): a=set_type([1,2,3,1]) b=[4] self.assertEqual(a.issuperset(b), False) def test_issubset_yes_iter(self, set_type): a=set_type([1,2]) b=[1,3,2] self.assertEqual(a.issubset(b), True) def test_issubset_no_iter(self, set_type): a=set_type([1,2]) b=[1,1,3] self.assertEqual(a.issubset(b), False) def test_issubset_yes(self, set_type): a=set_type([1,2]) b=set_type([1,3,2]) self.assertEqual(a.issubset(b), True) self.assertEqual(b.issubset(a), False) def test_issubset_no(self, set_type): a=set_type([1,2]) b=set_type([1,1,3]) self.assertEqual(a.issubset(b), False) self.assertEqual(b.issubset(a), False) def test_compare_self(self, set_type): a=set_type([1,2]) self.assertEqual(a<=a, True) self.assertEqual(a>=a, True) self.assertEqual(a<a, False) self.assertEqual(a>a, False) def test_compare_no_relation(self, set_type): a=set_type([1,2]) b=set_type([1,3]) self.assertEqual(a<=b, False) self.assertEqual(a>=b, False) self.assertEqual(a<b, False) self.assertEqual(a>b, False) def test_compare_real_subset(self, set_type): a=set_type([1,2,3]) b=set_type([1,3]) self.assertEqual(a<=b, False) self.assertEqual(a>=b, True) self.assertEqual(a<b, False) self.assertEqual(a>b, True) def test_compare_same(self, set_type): a=set_type([1,3]) b=set_type([1,3]) self.assertEqual(a<=b, True) self.assertEqual(a>=b, True) self.assertEqual(a<b, False) self.assertEqual(a>b, False) def test_compare_equal_yes(self, set_type): a=set_type([2,5,7,8,1,3]) b=set_type([1,3,7,7,7,7,7,2,5,8,8,8,8,8,8]) self.assertEqual(a==b, True) self.assertEqual(a==b, True) def test_compare_equal_yes(self, set_type): a=set_type([2,5,7,8,1,3]) b=set_type([3,7,7,7,7,7,2,5,8,8,8,8,8,8]) self.assertEqual(a==b, False) self.assertEqual(a==b, False) @pytest.mark.parametrize( "set_type", [Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet] ) class TestCopy(UnitTestMock): def test_with_none(self, set_type): s=set_type([1,2,3,1]) copy=pick_fun("copy", set_type) self.assertTrue(copy(None) is None) def test_with_empty(self, set_type): a=set_type([]) copy=pick_fun("copy", set_type) self.assertEqual(len(copy(a)), 0) def test_small(self, set_type): a=set_type([1,2,3,1]) copy=pick_fun("copy", set_type) self.assertEqual(copy(a)==a, True) def test_large(self, set_type): a=set_type(range(33,10000,3)) copy=pick_fun("copy", set_type) self.assertEqual(copy(a)==a, True) def test_large_method(self, set_type): a=set_type(range(33,10000,3)) self.assertEqual(a.copy()==a, True) @pytest.mark.parametrize( "set_type", [Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet] ) class TestUpdate(UnitTestMock): def test_with_none(self, set_type): s=set_type([1,2,3,1]) update=pick_fun("update", set_type) with pytest.raises(TypeError) as context: update(None,s) self.assertTrue("'NoneType' object is not iterable" in context.value.args[0]) with pytest.raises(TypeError) as context: update(s,None) self.assertTrue("'NoneType' object is not iterable" in context.value.args[0]) with pytest.raises(TypeError) as context: update(None,None) self.assertTrue("'NoneType' object is not iterable" in context.value.args[0]) def test_some_common(self, set_type): a=set_type([1,2,3,4]) b=set_type([2,1,2,5]) c=b.copy() update=pick_fun("update", set_type) update(a,b) self.assertEqual(a, set_type([1,2,3,4,5])) self.assertEqual(b, c) def test_with_itself(self, set_type): a=set_type([1,2,3,1]) b=a.copy() update=pick_fun("update", set_type) update(a,a) self.assertEqual(a, b) def test_with_disjunct(self, set_type): a=set_type(range(50)) b=set_type(range(50,100)) update=pick_fun("update", set_type) update(a,b) self.assertEqual(a, set_type(range(100))) def test_method_with_set(self, set_type): a=set_type(range(50)) b=set_type(range(100)) a.update(b) self.assertEqual(a, set_type(range(100))) def test_method_with_set(self, set_type): a=set_type(range(50)) b=set_type(range(100)) a.update(b) self.assertEqual(a, set_type(range(100))) def test_method_with_iterator(self, set_type): a=set_type(range(50)) a.update(range(60)) self.assertEqual(a, set_type(range(60))) def test_ior(self, set_type): a=set_type(range(50)) a|=set_type(range(60)) self.assertEqual(a, set_type(range(60))) def test_union(self, set_type): a=set_type(range(30)) a_copy = a.copy() b=a.union(range(30,40), set_type(range(40,50)), range(50,60)) self.assertEqual(b, set_type(range(60))) self.assertEqual(a, a_copy) def test_union_empty(self, set_type): a=set_type(range(30)) a.union() self.assertEqual(a, set_type(range(30))) def test_or(self, set_type): a=set_type(range(30)) b=set_type(range(30,40)) c=set_type(range(40,50)) d=a|b|c self.assertEqual(d, set_type(range(50))) self.assertEqual(a, set_type(range(30))) self.assertEqual(b, set_type(range(30,40))) self.assertEqual(c, set_type(range(40,50))) @pytest.mark.parametrize( "set_type", [Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet] ) class TestSwap(UnitTestMock): def test_with_none(self, set_type): s=set_type([1,2,3,1]) swap=pick_fun("swap", set_type) with pytest.raises(TypeError) as context: swap(None,s) self.assertTrue("'NoneType' object is not iterable" in context.value.args[0]) with pytest.raises(TypeError) as context: swap(s,None) self.assertTrue("'NoneType' object is not iterable" in context.value.args[0]) with pytest.raises(TypeError) as context: swap(None,None) self.assertTrue("'NoneType' object is not iterable" in context.value.args[0]) def test_some_common(self, set_type): a=set_type([1,2,3,4]) b=set_type([5,2,4]) a_copy=a.copy() b_copy=b.copy() swap=pick_fun("swap", set_type) swap(a,b) self.assertEqual(a, b_copy) self.assertEqual(b, a_copy) swap(a,b) self.assertEqual(a, a_copy) self.assertEqual(b, b_copy) @pytest.mark.parametrize( "set_type", [Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet] ) class TestIntersect(UnitTestMock): def test_with_none(self, set_type): s=set_type([1,2,3,1]) intersect=pick_fun("intersect", set_type) with pytest.raises(TypeError) as context: intersect(None,s) self.assertTrue("'NoneType' object is not iterable" in context.value.args[0]) with pytest.raises(TypeError) as context: intersect(s,None) self.assertTrue("'NoneType' object is not iterable" in context.value.args[0]) with pytest.raises(TypeError) as context: intersect(None,None) self.assertTrue("'NoneType' object is not iterable" in context.value.args[0]) def test_small(self, set_type): a=set_type([1,2,3,4]) b=set_type([5,2,4]) a_copy=a.copy() b_copy=b.copy() intersect=pick_fun("intersect", set_type) c=intersect(a,b) self.assertEqual(a, a_copy) self.assertEqual(b, b_copy) self.assertEqual(c, set_type([2,4])) c=intersect(b,a) self.assertEqual(a, a_copy) self.assertEqual(b, b_copy) self.assertEqual(c, set_type([2,4])) def test_disjunct(self, set_type): a=set_type([1,3,5,7,9]) b=set_type([2,2,4,6,8,10]) a_copy=a.copy() b_copy=b.copy() intersect=pick_fun("intersect", set_type) c=intersect(a,b) self.assertEqual(a, a_copy) self.assertEqual(b, b_copy) self.assertEqual(c, set_type()) c=intersect(b,a) self.assertEqual(a, a_copy) self.assertEqual(b, b_copy) self.assertEqual(c, set_type([])) def test_empty(self, set_type): a=set_type([]) b=set_type([]) c=set_type([2,2,4,6,8,10]) intersect=pick_fun("intersect", set_type) d=intersect(a,b) self.assertEqual(len(d), 0) d=intersect(c,b) self.assertEqual(len(d), 0) d=intersect(a,c) self.assertEqual(len(d), 0) def test_intersection_update(self, set_type): a=set_type([1,2,3,4,5,6,7,8]) b=set_type([2,4,6,8,10,12]) b_copy = b.copy() a.intersection_update(b) self.assertEqual(a, set_type([2,4,6,8])) self.assertEqual(b, b_copy) def test_intersection_update_iter(self, set_type): a=set_type([1,2,3,4,5,6,7,8]) a.intersection_update([2,4,6,8,10,12]) self.assertEqual(a, set_type([2,4,6,8])) def test_empty_update(self, set_type): a=set_type([1,2,3,4,5,6,7,8]) b=set_type([]) a.intersection_update(b) self.assertEqual(len(a), 0) def test_empty_update_iter(self, set_type): a=set_type([1,2,3,4,5,6,7,8]) a.intersection_update([]) self.assertEqual(a, set_type()) def test_iadd(self, set_type): a=set_type([1,2,3,4,5,6,7,8]) b=set_type([1,104,3]) a&=b self.assertEqual(a, set_type([1,3])) def test_add(self, set_type): a=set_type([1,2,3,4,5,6,7,8]) b=set_type([1,104,3]) a_copy=a.copy() b_copy=b.copy() c=a&b self.assertEqual(c, set_type([1,3])) self.assertEqual(a, a_copy) self.assertEqual(b, b_copy) def test_intersection(self, set_type): a=set_type([1,2,3,4,5,6,7,8]) a_copy=a.copy() c=a.intersection([1,2,3,4,5,6], set_type([1,2,3,4,5]), [1,2,3]) self.assertEqual(c, set_type([1,2,3])) self.assertEqual(a, a_copy) @pytest.mark.parametrize( "set_type", [Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet] ) class TestDifference(UnitTestMock): def test_with_none(self, set_type): s=set_type([1,2,3,1]) difference=pick_fun("difference", set_type) with pytest.raises(TypeError) as context: difference(None,s) self.assertTrue("'NoneType' object is not iterable" in context.value.args[0]) with pytest.raises(TypeError) as context: difference(s,None) self.assertTrue("'NoneType' object is not iterable" in context.value.args[0]) with pytest.raises(TypeError) as context: difference(None,None) self.assertTrue("'NoneType' object is not iterable" in context.value.args[0]) def test_small(self, set_type): a=set_type([1,2,3,4]) b=set_type([5,2,4]) a_copy=a.copy() b_copy=b.copy() difference=pick_fun("difference", set_type) c=difference(a,b) self.assertEqual(a, a_copy) self.assertEqual(b, b_copy) self.assertEqual(c, set_type([1,3])) c=difference(b,a) self.assertEqual(a, a_copy) self.assertEqual(b, b_copy) self.assertEqual(c, set_type([5])) def test_disjunct(self, set_type): a=set_type([1,3,5,7,9]) b=set_type([2,2,4,6,8,10]) a_copy=a.copy() b_copy=b.copy() difference=pick_fun("difference", set_type) c=difference(a,b) self.assertEqual(a, a_copy) self.assertEqual(b, b_copy) self.assertEqual(c, a) c=difference(b,a) self.assertEqual(a, a_copy) self.assertEqual(b, b_copy) self.assertEqual(c, b) def test_empty(self, set_type): a=set_type([]) b=set_type([]) c=set_type([2,2,4,6,8,10]) difference=pick_fun("difference", set_type) d=difference(a,b) self.assertEqual(len(d), 0) d=difference(c,b) self.assertEqual(c, d) d=difference(a,c) self.assertEqual(len(d), 0) def test_method_update(self, set_type): a=set_type([1,2,3,4]) b=set_type([5,2,4]) b_copy=b.copy() a.difference_update(b) self.assertEqual(b, b_copy) self.assertEqual(a, set_type([1,3])) def test_method_update2(self, set_type): a=set_type([1,2,3,4]) b=set_type([5,2,4]) a_copy=a.copy() b.difference_update(a) self.assertEqual(a, a_copy) self.assertEqual(b, set_type([5])) def test_method_update_from_iter(self, set_type): a=set_type([1,2,3,4]) a.difference_update([5,2,4]) self.assertEqual(a, set_type([1,3])) def test_method_update_from_iter2(self, set_type): a=set_type(range(1000)) a.difference_update(range(0,1000,2)) self.assertEqual(a, set_type(range(1,1000,2))) def test_method_update_from_iter3(self, set_type): a=set_type([1,2]) a.difference_update([1]*10000) self.assertEqual(a, set_type([2])) def test_sub(self, set_type): a=set_type([0,222,3,444,5]) b=set_type([222,3,4]) a_copy=a.copy() b_copy=b.copy() c=a-b self.assertEqual(a, a_copy) self.assertEqual(b, b_copy) self.assertEqual(c, set_type([0,444,5])) c=b-a self.assertEqual(a, a_copy) self.assertEqual(b, b_copy) self.assertEqual(c, set_type([4])) def test_sub2(self, set_type): a=set_type([1,2,3,4]) a_copy=a.copy() b=a-a-a-a self.assertEqual(a, a_copy) self.assertEqual(b, set_type()) def test_isub(self, set_type): a=set_type([0,222,3,444,5]) b=set_type([222,3,4]) b_copy=b.copy() a-=b self.assertEqual(b, b_copy) self.assertEqual(a, set_type([0,444,5])) def test_isub2(self, set_type): a=set_type([1,2,3,4]) a-=a self.assertEqual(a, set_type()) def test_difference_method(self, set_type): a=set_type(range(10000)) a_copy=a.copy() b=a.difference(range(5000), set_type(range(5000,10000,2)), range(1,9999,2)) self.assertEqual(b, set_type([9999])) self.assertEqual(a, a_copy) @pytest.mark.parametrize( "set_type", [Int64Set, Int32Set, Float64Set, Float32Set, PyObjectSet] ) class TestSymmetricDifference(UnitTestMock): def test_with_none(self, set_type): s=set_type([1,2,3,1]) symmetric_difference=pick_fun("symmetric_difference", set_type) with pytest.raises(TypeError) as context: symmetric_difference(None,s) self.assertTrue("'NoneType' object is not iterable" in context.value.args[0]) with pytest.raises(TypeError) as context: symmetric_difference(s,None) self.assertTrue("'NoneType' object is not iterable" in context.value.args[0]) with pytest.raises(TypeError) as context: symmetric_difference(None,None) self.assertTrue("'NoneType' object is not iterable" in context.value.args[0]) def test_small(self, set_type): a=set_type([1,2,3,4]) b=set_type([5,2,4]) a_copy=a.copy() b_copy=b.copy() symmetric_difference=pick_fun("symmetric_difference", set_type) c=symmetric_difference(a,b) self.assertEqual(a, a_copy) self.assertEqual(b, b_copy) self.assertEqual(c, set_type([1,3,5])) c=symmetric_difference(b,a) self.assertEqual(a, a_copy) self.assertEqual(b, b_copy) self.assertEqual(c, set_type([1,3,5])) def test_disjunct(self, set_type): a=set_type([1,3,5,7,9]) b=set_type([2,2,4,6,8,10]) a_copy=a.copy() b_copy=b.copy() symmetric_difference=pick_fun("symmetric_difference", set_type) c=symmetric_difference(a,b) self.assertEqual(a, a_copy) self.assertEqual(b, b_copy) self.assertEqual(c, a|b) c=symmetric_difference(b,a) self.assertEqual(a, a_copy) self.assertEqual(b, b_copy) self.assertEqual(c, a|b) def test_empty(self, set_type): a=set_type([]) b=set_type([]) c=set_type([2,2,4,6,8,10]) symmetric_difference=pick_fun("symmetric_difference", set_type) d=symmetric_difference(a,b) self.assertEqual(len(d), 0) d=symmetric_difference(c,b) self.assertEqual(c, d) d=symmetric_difference(a,c) self.assertEqual(c, d) def test_method_update(self, set_type): a=set_type([1,2,3,4]) b=set_type([5,2,4]) b_copy=b.copy() a.symmetric_difference_update(b) self.assertEqual(b, b_copy) self.assertEqual(a, set_type([1,3,5])) def test_method_update2(self, set_type): a=set_type([1,2,3,4]) b=set_type([5,2,4]) a_copy=a.copy() b.symmetric_difference_update(a) self.assertEqual(a, a_copy) self.assertEqual(b, set_type([1,3,5])) def test_method_update_from_iter(self, set_type): a=set_type([1,2,3,4]) a.symmetric_difference_update([5,2,4]) self.assertEqual(a, set_type([1,3, 5])) def test_method_update_from_iter2(self, set_type): a=set_type(range(1000)) a.symmetric_difference_update(range(0,1000,2)) self.assertEqual(a, set_type(range(1,1000,2))) def test_method_update_from_iter3(self, set_type): a=set_type([1,2]) a.symmetric_difference_update([1]*10000) self.assertEqual(a, set_type([2])) def test_method_update_from_iter4(self, set_type): a=set_type([1,2]) a.symmetric_difference_update(a) self.assertEqual(len(a), 0) def test_xor(self, set_type): a=set_type([0,222,3,444,5]) b=set_type([222,3,4]) a_copy=a.copy() b_copy=b.copy() c=a^b self.assertEqual(a, a_copy) self.assertEqual(b, b_copy) self.assertEqual(c, set_type([0,444,5,4])) c=b^a self.assertEqual(a, a_copy) self.assertEqual(b, b_copy) self.assertEqual(c, set_type([0,444,5,4])) def test_xor2(self, set_type): a=set_type([1,2,3,4]) a_copy=a.copy() b=a^a^a^a self.assertEqual(a, a_copy) self.assertEqual(len(b), 0) def test_xor3(self, set_type): a=set_type([1,2,3,4]) a_copy=a.copy() b=a^a^a^a^a self.assertEqual(a, a_copy) self.assertEqual(b, a) def test_ixor(self, set_type): a=set_type([0,222,3,444,5]) b=set_type([222,3,4]) b_copy=b.copy() a^=b self.assertEqual(b, b_copy) self.assertEqual(a, set_type([0,444,5,4])) def test_ixor2(self, set_type): a=set_type([1,2,3,4]) a^=a self.assertEqual(a, set_type()) def test_symmetric_method(self, set_type): a=set_type(range(10)) a_copy=a.copy() b=a.symmetric_difference(range(5,15), set_type(range(5,10)), range(1,16)) self.assertEqual(b, set_type([0,15])) self.assertEqual(a, a_copy)
22,305
125
2,763
a1fba859af22a9a1b8fc1410806c26c08f872a7f
6,478
py
Python
lib/airflow/tests/contrib/jobs/test_dag_trigger.py
ideax-business/ai-flow
0a7797a093beddf827f68cad7a11e0babf1b5059
[ "Apache-2.0", "BSD-2-Clause", "MIT", "ECL-2.0", "BSD-3-Clause" ]
null
null
null
lib/airflow/tests/contrib/jobs/test_dag_trigger.py
ideax-business/ai-flow
0a7797a093beddf827f68cad7a11e0babf1b5059
[ "Apache-2.0", "BSD-2-Clause", "MIT", "ECL-2.0", "BSD-3-Clause" ]
null
null
null
lib/airflow/tests/contrib/jobs/test_dag_trigger.py
ideax-business/ai-flow
0a7797a093beddf827f68cad7a11e0babf1b5059
[ "Apache-2.0", "BSD-2-Clause", "MIT", "ECL-2.0", "BSD-3-Clause" ]
null
null
null
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import multiprocessing import os import time import unittest import pendulum.datetime from notification_service.base_notification import BaseEvent from notification_service.client import NotificationClient from notification_service.event_storage import MemoryEventStorage from notification_service.server import NotificationServer from notification_service.service import NotificationService from airflow.models.serialized_dag import SerializedDagModel from airflow.contrib.jobs.dag_trigger import DagTrigger from airflow.contrib.jobs.scheduler_client import EventSchedulerClient, ResponseWatcher from airflow.models import DagModel from airflow.utils.mailbox import Mailbox from airflow.utils.session import create_session from airflow.events.scheduler_events import SchedulerInnerEventUtil, SchedulerInnerEventType, SCHEDULER_NAMESPACE from tests.test_utils import db from tests.test_utils.config import conf_vars DAG_FOLDER = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'dags') TEST_DAG_FILE = os.path.join(DAG_FOLDER, 'test_scheduler_dags.py') if __name__ == '__main__': unittest.main()
42.064935
113
0.706082
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import multiprocessing import os import time import unittest import pendulum.datetime from notification_service.base_notification import BaseEvent from notification_service.client import NotificationClient from notification_service.event_storage import MemoryEventStorage from notification_service.server import NotificationServer from notification_service.service import NotificationService from airflow.models.serialized_dag import SerializedDagModel from airflow.contrib.jobs.dag_trigger import DagTrigger from airflow.contrib.jobs.scheduler_client import EventSchedulerClient, ResponseWatcher from airflow.models import DagModel from airflow.utils.mailbox import Mailbox from airflow.utils.session import create_session from airflow.events.scheduler_events import SchedulerInnerEventUtil, SchedulerInnerEventType, SCHEDULER_NAMESPACE from tests.test_utils import db from tests.test_utils.config import conf_vars DAG_FOLDER = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(__file__))), 'dags') TEST_DAG_FILE = os.path.join(DAG_FOLDER, 'test_scheduler_dags.py') class TestDagTrigger(unittest.TestCase): def setUp(self) -> None: db.clear_db_dags() db.clear_db_serialized_dags() def test_dag_trigger_is_alive(self): mailbox = Mailbox() dag_trigger = DagTrigger(".", -1, [], False, mailbox) assert not dag_trigger.is_alive() dag_trigger.start() time.sleep(1) assert dag_trigger.is_alive() dag_trigger.end() assert not dag_trigger.is_alive() def test_dag_trigger(self): mailbox = Mailbox() dag_trigger = DagTrigger(".", -1, [], False, mailbox) dag_trigger.start() type(self)._add_dag_needing_dagrun() message = mailbox.get_message() message = SchedulerInnerEventUtil.to_inner_event(message) assert message.dag_id == "test" dag_trigger.end() def test_dag_trigger_parse_dag(self): mailbox = Mailbox() dag_trigger = DagTrigger(TEST_DAG_FILE, -1, [], False, mailbox) dag_trigger.start() message = mailbox.get_message() message = SchedulerInnerEventUtil.to_inner_event(message) # only one dag is executable assert "test_task_start_date_scheduling" == message.dag_id assert DagModel.get_dagmodel(dag_id="test_task_start_date_scheduling") is not None assert DagModel.get_dagmodel(dag_id="test_start_date_scheduling") is not None assert SerializedDagModel.get(dag_id="test_task_start_date_scheduling") is not None assert SerializedDagModel.get(dag_id="test_start_date_scheduling") is not None dag_trigger.end() def test_user_trigger_parse_dag(self): port = 50101 notification_server_uri = 'localhost:{}'.format(port) storage = MemoryEventStorage() server = NotificationServer(NotificationService(storage), port) server.run() mailbox = Mailbox() dag_trigger = DagTrigger(TEST_DAG_FILE, -1, [], False, mailbox, 5, notification_server_uri) dag_trigger.start() message = mailbox.get_message() message = SchedulerInnerEventUtil.to_inner_event(message) # only one dag is executable assert "test_task_start_date_scheduling" == message.dag_id sc = EventSchedulerClient(notification_server_uri=notification_server_uri, namespace='a') sc.trigger_parse_dag(TEST_DAG_FILE) dag_trigger.end() server.stop() def test_file_processor_manager_kill(self): mailbox = Mailbox() dag_trigger = DagTrigger(".", -1, [], False, mailbox) dag_trigger.start() dag_file_processor_manager_process = dag_trigger._dag_file_processor_agent._process dag_file_processor_manager_process.kill() dag_file_processor_manager_process.join(1) assert not dag_file_processor_manager_process.is_alive() cnt = 0 while cnt < 100 and not dag_file_processor_manager_process.is_alive(): dag_file_processor_manager_process = dag_trigger._dag_file_processor_agent._process if not dag_file_processor_manager_process.is_alive(): time.sleep(0.1) cnt = cnt + 1 continue else: break assert dag_file_processor_manager_process.is_alive() dag_trigger.end() def _send_request_and_receive_response(self, notification_server_uri, file_path): key = '{}_{}'.format(file_path, time.time_ns()) client = NotificationClient(server_uri=notification_server_uri, default_namespace=SCHEDULER_NAMESPACE) event = BaseEvent(key=key, event_type=SchedulerInnerEventType.PARSE_DAG_REQUEST.value, value=file_path) watcher: ResponseWatcher = ResponseWatcher() client.start_listen_event(key=key, event_type=SchedulerInnerEventType.PARSE_DAG_RESPONSE.value, watcher=watcher) client.send_event(event) res: BaseEvent = watcher.get_result() self.assertEquals(event.key, res.key) self.assertEquals(event.value, file_path) client.stop_listen_events() @staticmethod def _add_dag_needing_dagrun(): with create_session() as session: orm_dag = DagModel(dag_id="test") orm_dag.is_paused = False orm_dag.is_active = True orm_dag.next_dagrun_create_after = pendulum.now() session.merge(orm_dag) session.commit() if __name__ == '__main__': unittest.main()
4,286
253
23
db967d18b0150b9b46f807e4175b670ec1cd835a
1,063
py
Python
tests/test_model_registry.py
murthyn/composer
2a04cf387dd8558556500f7ef2bc6d3d131043d5
[ "Apache-2.0" ]
null
null
null
tests/test_model_registry.py
murthyn/composer
2a04cf387dd8558556500f7ef2bc6d3d131043d5
[ "Apache-2.0" ]
null
null
null
tests/test_model_registry.py
murthyn/composer
2a04cf387dd8558556500f7ef2bc6d3d131043d5
[ "Apache-2.0" ]
null
null
null
# Copyright 2021 MosaicML. All Rights Reserved. import pytest from composer.models import ModelHparams from composer.trainer.trainer_hparams import model_registry @pytest.mark.parametrize("model_name", model_registry.keys())
26.575
61
0.686736
# Copyright 2021 MosaicML. All Rights Reserved. import pytest from composer.models import ModelHparams from composer.trainer.trainer_hparams import model_registry @pytest.mark.parametrize("model_name", model_registry.keys()) def test_model_registry(model_name, request): if model_name in ['timm']: pytest.importorskip("timm") if model_name in ['unet']: pytest.importorskip("monai") # create the model hparams object model_hparams = model_registry[model_name]() requires_num_classes = set([ "deeplabv3", "resnet9_cifar10", "resnet56_cifar10", "efficientnetb0", "resnet", "mnist_classifier", ]) if model_name in requires_num_classes: model_hparams.num_classes = 10 if model_name == "resnet": model_hparams.model_name = 'resnet50' if model_name == "deeplabv3": model_hparams.is_backbone_pretrained = False if model_name == "timm": model_hparams.model_name = "resnet18" assert isinstance(model_hparams, ModelHparams)
812
0
22
f0d8e904d213c2024920577bec459677043c4496
1,490
py
Python
hardhat/recipes/poppler.py
stangelandcl/hardhat
1ad0c5dec16728c0243023acb9594f435ef18f9c
[ "MIT" ]
null
null
null
hardhat/recipes/poppler.py
stangelandcl/hardhat
1ad0c5dec16728c0243023acb9594f435ef18f9c
[ "MIT" ]
null
null
null
hardhat/recipes/poppler.py
stangelandcl/hardhat
1ad0c5dec16728c0243023acb9594f435ef18f9c
[ "MIT" ]
null
null
null
from .base import GnuRecipe
32.391304
75
0.502013
from .base import GnuRecipe class Extra: pass class PopplerRecipe(GnuRecipe): def __init__(self, *args, **kwargs): super(PopplerRecipe, self).__init__(*args, **kwargs) self.sha256 = '420abaab63caed9e1ee28964a0ba216d' \ '1979506726164bc99ad5ade289192a1b' self.name = 'poppler' self.version = '0.41.0' self.depends = ['cairo', 'curl', 'fontconfig', 'gtk2', 'libjpeg-turbo', 'libpng', 'libtiff', 'qt5', 'pkgconfig'] self.url = 'http://poppler.freedesktop.org/poppler-$version.tar.xz' e = Extra() e.name = 'poppler-data' e.version = '0.4.7' e.url = 'http://poppler.freedesktop.org/' \ 'poppler-data-%s.tar.gz' % e.version e.sha256 = 'e752b0d88a7aba54574152143e7bf764' \ '36a7ef51977c55d6bd9a48dccde3a7de' self.extra_downloads = [e] self.configure_args += [ '--disable-static', '--enable-cmyk', '--enable-build-type=release', '--enable-xpdf-headers', '--with-testdatadir=$PWD/testfiles' ] def extract(self): super(PopplerRecipe, self).extract() for e in self.extra_downloads: self.extract_into(e.filename, self.extract_dir)
1,351
10
99
6d2e3d350112bf5ba43f5eeb7294864da2a45653
1,290
py
Python
src/lab2rgb.py
ytyaru/Python.ColorSpace.Converter.20210606081641
7cd88aa12eb23ac26785ad1bff3a3e8034947c65
[ "CC0-1.0" ]
null
null
null
src/lab2rgb.py
ytyaru/Python.ColorSpace.Converter.20210606081641
7cd88aa12eb23ac26785ad1bff3a3e8034947c65
[ "CC0-1.0" ]
null
null
null
src/lab2rgb.py
ytyaru/Python.ColorSpace.Converter.20210606081641
7cd88aa12eb23ac26785ad1bff3a3e8034947c65
[ "CC0-1.0" ]
null
null
null
#!/usr/bin/env python3 # coding: utf8 from labconstants import LabConstants if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='色空間を変換する。Lab->RGB', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('lab', help="色。カンマ区切りで渡す。\n 0.0 <= l <= 100.0\n -86.18463 <= a <= 98.25422\n -107.863686 <= b <= 94.48248") args = parser.parse_args() lab = args.lab.split(',') lab[0] = float(lab[0]) lab[1] = float(lab[1]) lab[2] = float(lab[2]) r, g, b = lab2rgb(lab) print('%d,%d,%d' % (r, g, b))
33.076923
134
0.589147
#!/usr/bin/env python3 # coding: utf8 from labconstants import LabConstants def lab2rgb(lab): import math l, a, b = lab y = (l + 16) / 116 x = y if math.isnan(a) else y + a / 500 z = y if math.isnan(b) else y - b / 200 y = LabConstants.Yn * lab_xyz(y) x = LabConstants.Xn * lab_xyz(x) z = LabConstants.Zn * lab_xyz(z) r = xyz_rgb(3.2404542 * x - 1.5371385 * y - 0.4985314 * z) # D65 -> sRGB g = xyz_rgb(-0.9692660 * x + 1.8760108 * y + 0.0415560 * z) b_ = xyz_rgb(0.0556434 * x - 0.2040259 * y + 1.0572252 * z) return (r, g, b_) def xyz_rgb(r): return 255 * (12.92 * r if r <= 0.00304 else 1.055 * pow(r, 1 / 2.4) - 0.055) def lab_xyz(t): return t * t * t if t > LabConstants.t1 else LabConstants.t2 * (t - LabConstants.t0) if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='色空間を変換する。Lab->RGB', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('lab', help="色。カンマ区切りで渡す。\n 0.0 <= l <= 100.0\n -86.18463 <= a <= 98.25422\n -107.863686 <= b <= 94.48248") args = parser.parse_args() lab = args.lab.split(',') lab[0] = float(lab[0]) lab[1] = float(lab[1]) lab[2] = float(lab[2]) r, g, b = lab2rgb(lab) print('%d,%d,%d' % (r, g, b))
642
0
68
0300076d66cbcef4d8deb5912fdfdb704586b6be
23,969
py
Python
app/model.py
Princessgladys/googleresourcefinder
7715276b3c588f7c457de04944559052c8170f7e
[ "Apache-2.0" ]
null
null
null
app/model.py
Princessgladys/googleresourcefinder
7715276b3c588f7c457de04944559052c8170f7e
[ "Apache-2.0" ]
null
null
null
app/model.py
Princessgladys/googleresourcefinder
7715276b3c588f7c457de04944559052c8170f7e
[ "Apache-2.0" ]
null
null
null
# Copyright 2009-2010 by Ka-Ping Yee # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The Resource Mapper data model. All entities and fields are add-only and are never deleted or overwritten. To represent modifications to a country's data, create a new Version under the appropriate Country. To represent modifications to a subject's attributes, add a new Report for that Subject. This project uses the following naming conventions in variables and properties: "key": An App Engine entity key. "name": A unique identifier for an entity, used as the entity's key_name. Fields ending in "_name" contain the key_names of other entities. Names should be C-style identifiers (thus usable as HTML element IDs, form element names, and CSS classes) and are usually not displayed in the UI directly. For each name, there should be a corresponding Message entity that provides the localized text. "title": A non-localizable, UI-displayable text label for an entity. """ import datetime from google.appengine.ext import db MAX_DATE = datetime.datetime(datetime.MAXYEAR, 1, 1) class Subdomain(db.Model): """A separate grouping of Subjects and SubjectTypes. Top-level entity, with no parent. Key name: unique subdomain name. In the UI, each subdomain appears to be an independent instance of the application. The permitted actions of Accounts and key_names of Subjects and SubjectTypes are namespaced by prefixing them with the subdomain name and a colon. All other entities are shared across all subdomains.""" pass # No properties for now; only the key_name is significant. def filter_by_prefix(query, key_name_prefix, root_kind=None): """Filters a query for key_names that have the given prefix. If root_kind is specified, filters the query for children of any entities that are of that kind with the given prefix; otherwise, the results are assumed to be top-level entities of the kind being queried.""" root_kind = root_kind or query._model_class.__name__ min_key = db.Key.from_path(root_kind, key_name_prefix) max_key = db.Key.from_path(root_kind, key_name_prefix + u'\uffff') return query.filter('__key__ >=', min_key).filter('__key__ <=', max_key) def value_or_none(value): """Converts any false value other than 0 or 0.0 to None.""" if value or value == 0: return value return None def get_name(name_or_entity): """If given an entity, returns its name (without subdomain); if given a string, returns the string itself.""" if isinstance(name_or_entity, (Subject, SubjectType)): return name_or_entity.name elif isinstance(name_or_entity, db.Model): return name_or_entity.key().name() else: return name_or_entity class SubdomainMixin: """A mix-in class providing common methods for entities whose key names begin with a subdomain and a colon.""" @classmethod def get(cls, subdomain, name): """Gets an entity by its subdomain and name. This method overrides the default get() method, which takes a db.Key.""" return cls.get_by_key_name(subdomain + ':' + name) @classmethod def all_in_subdomain(cls, subdomain): """Gets a query for all entities with the given subdomain.""" root_kind = getattr(cls, 'ROOT_KIND', None) return filter_by_prefix(cls.all(), subdomain + ':', root_kind) def get_subdomain(self): """Gets the entity's subdomain.""" return self.key().name().split(':', 1)[0] subdomain = property(get_subdomain) def get_name(self): """Gets the entity's name (without the subdomain).""" return self.key().name().split(':', 1)[1] name = property(get_name) class Subject(SubdomainMixin, db.Expando): """A thing whose attributes are tracked by this application. Top-level entity, has no parent. Key name: subdomain + ':' + subject name. A subject name is a globally unique ID that starts with a domain name and a slash. In the 'haiti' subdomain, Subjects are health facilities with a government or internationally established health facility ID.""" timestamp = db.DateTimeProperty(auto_now_add=True) # creation time type = db.StringProperty(required=True) # key_name of a SubjectType, # without the subdomain prefix author = db.UserProperty() # who created this Subject # additional properties for the current value of each attribute # (named by Attribute's key_name). This denormalization is for read speed. # Consider an attribute named 'foo'. We will store 6 values here: # foo__ various types, the attribute value # foo__observed datetime, timestamp when the value was valid # foo__author users.User, the user who provided the change # foo__author_nickname string, source of the change # foo__author_affiliation string, affiliation of the source # foo__comment string, a comment about the change # These properties will exist with the following invariants: # 1. If subject.foo__ is not present, that means attribute "foo" has never # existed on this subject at any point in the past. # 2. If subject.foo__ is None, that means some user actually set the # attribute to "(unspecified)". # 3. All six fields are always written together at the same time, and are # never removed. (Hence either all are present or none are present.) @staticmethod def create(subdomain, subject_type_or_type_name, subject_name, author): """Creates a Subject with a given subdomain, type, name, and author.""" return Subject(key_name='%s:%s' % (subdomain, subject_name), type=get_name(subject_type_or_type_name), author=author) @staticmethod def generate_name(host, subject_type_or_type_name): """Makes a new unique subject_name for an original subject (originally created in this repository, not cloned from an external repository).""" id = UniqueId.create_id() return '%s/%s.%d' % (host, get_name(subject_type_or_type_name), id) @staticmethod @classmethod def has_value(self, attribute_name): """Returns the value of the Attribute with the given key_name, or default if it does not exist.""" return hasattr(self, '%s__' % attribute_name) def get_value(self, attribute_name, default=None): """Returns the value of the Attribute with the given key_name, or default if it does not exist.""" return getattr(self, '%s__' % attribute_name, default) def get_observed(self, attribute_name, default=None): """Returns the timestamp when the Attribute with the given key_name was valid, or default if it does not exist.""" return getattr(self, '%s__observed' % attribute_name, default) def get_author(self, attribute_name, default=None): """Returns the author of the Attribute value with the given key_name, or default if it does not exist.""" return getattr(self, '%s__author' % attribute_name, default) def get_author_nickname(self, attribute_name, default=None): """Returns the author nickname of the Attribute value with the given key_name, or default if it does not exist.""" return getattr(self, '%s__author_nickname' % attribute_name, default) def get_author_affiliation(self, attribute_name, default=None): """Returns the affiliation of the author of the Attribute value with the given key_name, or default if it does not exist.""" return getattr(self, '%s__author_affiliation' % attribute_name, default) def get_comment(self, attribute_name, default=None): """Returns the author's comment about the Attribute value with the given key_name, or default if it does not exist.""" return getattr(self, '%s__comment' % attribute_name, default) def set_attribute(self, name, value, observed, author, author_nickname, author_affiliation, comment): """Sets the value for the Attribute with the given key_name.""" setattr(self, '%s__' % name, value_or_none(value)) setattr(self, '%s__observed' % name, value_or_none(observed)) setattr(self, '%s__author' % name, value_or_none(author)) setattr(self, '%s__author_nickname' % name, value_or_none(author_nickname)) setattr(self, '%s__author_affiliation' % name, value_or_none(author_affiliation)) setattr(self, '%s__comment' % name, value_or_none(comment)) class MinimalSubject(SubdomainMixin, db.Expando): """Minimal version of Subject that loads fast from the datastore and contains just the information needed to populate the initial list and map. Parent: Subject. Key name: same as its parent Subject. Wouldn't be necessary if we could select columns from the datastore.""" type = db.StringProperty(required=True) # key_name of a SubjectType, # without the subdomain prefix # More properties for the current values of ONLY the most critically # important attributes of Subject (named by Attribute's key_name). # An attribute named foo will be stored as 'foo__' to match Subject. ROOT_KIND = 'Subject' # filter_by_prefix uses this to filter keys properly @staticmethod @staticmethod def get_by_subject(subject): """Gets the MinimalSubject entity for the given Subject.""" return MinimalSubject.all().ancestor(subject).get() @staticmethod def has_value(self, attribute_name): """Returns the value of the Attribute with the given key_name, or default if it does not exist.""" return hasattr(self, '%s__' % attribute_name) def get_value(self, attribute_name, default=None): """Returns the value of the Attribute with the given key_name, or default if it does not exist.""" return getattr(self, '%s__' % attribute_name, default) def set_attribute(self, name, value): """Sets the value for the Attribute with the given key_name.""" setattr(self, '%s__' % name, value_or_none(value)) class UniqueId(db.Model): """This entity is used just to generate unique numeric IDs.""" @staticmethod def create_id(): """Gets a numeric ID that is guaranteed to be different from any ID previously returned by this static method.""" unique_id = UniqueId() unique_id.put() return unique_id.key().id() class SubjectType(SubdomainMixin, db.Model): """A type of Subject, e.g. hospital, warehouse, charity, camp. Top-level entity, has no parent. Key name: subdomain + ':' + type name. A type name is an identifier used as the value of Subject.type.""" timestamp = db.DateTimeProperty(auto_now_add=True) attribute_names = db.StringListProperty() # key_names of Attribute entities minimal_attribute_names = db.StringListProperty() # key_names of # Attribute entities whose values should be copied into MinimalSubject @staticmethod class Attribute(db.Model): """An attribute of a subject, e.g. services available, # of patients. Top-level entity, has no parent. Key name: name of a property in a Report, and also the name of the Message providing the UI-displayable attribute name.""" timestamp = db.DateTimeProperty(auto_now_add=True) type = db.StringProperty(required=True, choices=[ 'str', # value is a single-line string (Python unicode) 'text', # value is a string, shown as long text (Python unicode) 'contact', # value is a 3-line string (name, phone, e-mail address) 'date', # value is a date (Python datetime with time 00:00:00) 'int', # value is an integer (64-bit long) 'float', # value is a float (Python float, i.e. double) 'bool', # value is a boolean 'choice', # value is a string (one of the elements in 'values') 'multi', # value is a list of strings (which are elements of 'values') 'geopt', # value is a db.GeoPt with latitude and longitude ]) edit_action = db.StringProperty() # What Account action can edit? values = db.StringListProperty() # allowed value names for choice or multi class Report(db.Expando): """A report on the attributes and resources of a Subject. Parent: Subject. A Report may represent a partial update of the attributes of the subject.""" arrived = db.DateTimeProperty(auto_now_add=True) # date we received report source = db.StringProperty() # a URL, the source of the report author = db.UserProperty() # author of the report observed = db.DateTimeProperty() # date that report contents were valid # additional properties for each Attribute (named by Attribute's key_name) # Consider an attribute named 'foo'. We will store 2 values here: # foo__ various, the attribute value # foo__comment db.StringProperty, a comment from user making the change # These properties will exist with the following invariants: # 1. If report.foo__ is not present, that means attribute "foo" should # not change its value from the previous report. # 2. If report.foo__ is None, that means some user actually set the # attribute to "(unspecified)". # 3. Both fields are always written together at the same time, and # are never removed. (Hence either all are present or none are present.) def get_value(self, attribute_name, default=None): """Returns the value of the Attribute with the given key_name, or default if it does not exist.""" return getattr(self, '%s__' % attribute_name, default) def get_comment(self, attribute_name, default=None): """Returns the author's comment about the Attribute value with the given key_name, or default if it does not exist.""" return getattr(self, '%s__comment' % attribute_name, default) def set_attribute(self, name, value, comment): """Sets the value for the Attribute with the given key_name.""" setattr(self, '%s__' % name, value_or_none(value)) setattr(self, '%s__comment' % name, value_or_none(comment)) class Account(db.Model): """User account. Top-level entity, has no parent. Users without Account entities can use the application; for such users, their permissions are determined by the special Account object with key_name='default'. Users get their own Account entities when editing a Subject, requesting permissions, being granted permissions, or subscribing to alerts.""" timestamp = db.DateTimeProperty(auto_now_add=True) # creation time description = db.StringProperty() # full name or description email = db.StringProperty() # e-mail address of the account user_id = db.StringProperty() # users.User.id() of the account nickname = db.StringProperty() # nickname for display in the UI; may # differ from users.User.nickname() affiliation = db.StringProperty() # company or organization, etc. token = db.StringProperty() # secret token for looking up an Account with # no e-mail address (so we can have Account # entities for nonexistent Google Accounts) actions = db.StringListProperty() # actions allowed for this user (items # have the form subdomain + ':' + verb; # '*' is a wildcard subdomain or verb) requested_actions = db.StringListProperty() # permissions requested but # not yet granted locale = db.StringProperty() # user chosen locale # default frequency for updates default_frequency = db.StringProperty(default='instant') # preferred format to receive e-mail in email_format = db.StringProperty(choices=['plain', 'html'], default='plain') # For explanation of default settings for the next alert times, see # mail_alerts.py's send_digests function. # next time to send a daily update next_daily_alert = db.DateTimeProperty(default=MAX_DATE) # next time to send a weekly update to the user next_weekly_alert = db.DateTimeProperty(default=MAX_DATE) # next time to send a monthly update to the user next_monthly_alert = db.DateTimeProperty(default=MAX_DATE) class Message(db.Expando): """Internationalized strings for value identifiers. Top-level entity, has no parent.""" # Formerly namespace, renamed to work around bug in GAE 1.3.5 (b/2811890); # Can change back after 1.3.6, which contains the fix ns = db.StringProperty(required=True, choices=[ 'english', # name is an English string 'attribute_name', # name is an Attribute's key_name 'attribute_value', # name is a value name in a choice or multi attribute 'subject_type' # name is a SubjectType's key_name (including subdomain) ]) name = db.StringProperty() # additional properties for each language (named by locale code) class Dump(db.Model): """A record of the data received from a data source in its native format, before it was converted and loaded into the datastore. Top-level entity, has no parent.""" timestamp = db.DateTimeProperty(auto_now_add=True) base = db.SelfReference() # if present, this dump is a clone of base source = db.StringProperty() # URL identifying the source data = db.BlobProperty() # received raw data # TODO(kpy): Clean up the inconsistent use of the term "subject_name". # In Subscription, subject_name is the entire Subject key including the # subdomain; elsewhere it is just the part after the subdomain. class Subscription(db.Model): """A subscription by a user to receive notification when details for a facility change. Top-level entity, has no parent. Key name: follows the format subject_name:user_email""" user_email = db.StringProperty(required=True) # user to alert subject_name = db.StringProperty(required=True) # key_name of subject frequency = db.StringProperty(required=True, choices=[ 'instant', # send an alert whenever the facility is updated 'daily', # send during a daily digest e-mail 'weekly', # send during a weekly digest e-mail 'monthly' # send during a monthly digest e-mail on the 1st of the month ]) # frequency of updates for this subject @staticmethod def get(subject_name, user_email): """Gets a Subscription entity by its subject_name and e-mail.""" return Subscription.get_by_key_name(subject_name + ':' + user_email) @staticmethod def get_by_subject(subject_name): """Gets a query for all PendingAlert with the given subject name.""" return filter_by_prefix(Subscription.all(), subject_name + ':') class PendingAlert(MinimalSubject): """A pending notification for a user; waiting to be sent on a daily/weekly/ monthly basis, pending the frequency of the particular alert. Top-level entity, has no parent. Key name: follows the format frequency:user_email:subject_name""" user_email = db.StringProperty(required=True) # user to alert subject_name = db.StringProperty(required=True) # key_name of subject timestamp = db.DateTimeProperty() # creation time of the pending alert frequency = db.StringProperty(required=True, choices=[ 'instant', # send an alert whenever the subject is updated 'daily', # send during a daily digest e-mail 'weekly', # send during a weekly digest e-mail 'monthly' # send during a monthly digest e-mail on the 1st of the month ]) # frequency of updates for this subject @staticmethod def get(frequency, user_email, subject_name): """Gets a PendingAlert entity by its frequency, e-mail, and subject name.""" return PendingAlert.get_by_key_name(frequency + ':' + user_email + ':' + subject_name) @staticmethod def get_by_frequency(frequency, user_email): """Gets a query for all PendingAlert with the given frequency and associated user e-mail.""" return filter_by_prefix(PendingAlert.all(), frequency + ':' + user_email + ':') class MailUpdateText(db.Expando): """A map from attribute names and values to alternate values accepted in the mail editing system. They are strings that users can type into e-mails to refer to attribute names or values. Key name: follows the format namespace:name. This table should at all times contain the following special cases for general-use attribute values: true, false, and none. Accepted values are: true: ['Yes', 'y', 'true'] false: ['No', 'n', 'false'] none: ['*none'] by default. In total, the number of entities in the table should be equal to the number of attribute entities, plus the unique values across all multi and choice attributes, plus the 3 general values defined above.""" # name is an attribute name or attribute value; see below name = db.StringProperty(required=True) ns = db.StringProperty(required=True, choices=[ 'attribute_name', # name is an attribute's key_name 'attribute_value' # name is a value name in a choice or multi attribute ]) # Expando values should be initialized on a per-language basis as a list # of accepted input strings for this particular map, in that language. # Use the same naming format as in the Message table [en for English, # fr for French, etc.]. The first value in each list should match the # corresponding Message, to make mail editing behavior resilient to # changes in translations. For comparison purposes, each item in the # list will be treated as case-insensitive. Spaces should be used in # favor of underscores. @classmethod def get(cls, ns, name): """Gets an entity by its namespace and name.""" key_name = '%s:%s' % (ns, name) return cls.get_by_key_name(key_name) @classmethod def create(cls, ns, name, **kwargs): """Creates an entity with the specified namespace and name.""" key_name = '%s:%s' % (ns, name) return cls(key_name=key_name, ns=ns, name=name, **kwargs) @classmethod def all_in_namespace(cls, ns): """Gets a query for all entities with the given namespace.""" return filter_by_prefix(cls.all(), ns + ':')
49.217659
80
0.683925
# Copyright 2009-2010 by Ka-Ping Yee # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """The Resource Mapper data model. All entities and fields are add-only and are never deleted or overwritten. To represent modifications to a country's data, create a new Version under the appropriate Country. To represent modifications to a subject's attributes, add a new Report for that Subject. This project uses the following naming conventions in variables and properties: "key": An App Engine entity key. "name": A unique identifier for an entity, used as the entity's key_name. Fields ending in "_name" contain the key_names of other entities. Names should be C-style identifiers (thus usable as HTML element IDs, form element names, and CSS classes) and are usually not displayed in the UI directly. For each name, there should be a corresponding Message entity that provides the localized text. "title": A non-localizable, UI-displayable text label for an entity. """ import datetime from google.appengine.ext import db MAX_DATE = datetime.datetime(datetime.MAXYEAR, 1, 1) class Subdomain(db.Model): """A separate grouping of Subjects and SubjectTypes. Top-level entity, with no parent. Key name: unique subdomain name. In the UI, each subdomain appears to be an independent instance of the application. The permitted actions of Accounts and key_names of Subjects and SubjectTypes are namespaced by prefixing them with the subdomain name and a colon. All other entities are shared across all subdomains.""" pass # No properties for now; only the key_name is significant. def filter_by_prefix(query, key_name_prefix, root_kind=None): """Filters a query for key_names that have the given prefix. If root_kind is specified, filters the query for children of any entities that are of that kind with the given prefix; otherwise, the results are assumed to be top-level entities of the kind being queried.""" root_kind = root_kind or query._model_class.__name__ min_key = db.Key.from_path(root_kind, key_name_prefix) max_key = db.Key.from_path(root_kind, key_name_prefix + u'\uffff') return query.filter('__key__ >=', min_key).filter('__key__ <=', max_key) def value_or_none(value): """Converts any false value other than 0 or 0.0 to None.""" if value or value == 0: return value return None def get_name(name_or_entity): """If given an entity, returns its name (without subdomain); if given a string, returns the string itself.""" if isinstance(name_or_entity, (Subject, SubjectType)): return name_or_entity.name elif isinstance(name_or_entity, db.Model): return name_or_entity.key().name() else: return name_or_entity class SubdomainMixin: """A mix-in class providing common methods for entities whose key names begin with a subdomain and a colon.""" @classmethod def get(cls, subdomain, name): """Gets an entity by its subdomain and name. This method overrides the default get() method, which takes a db.Key.""" return cls.get_by_key_name(subdomain + ':' + name) @classmethod def all_in_subdomain(cls, subdomain): """Gets a query for all entities with the given subdomain.""" root_kind = getattr(cls, 'ROOT_KIND', None) return filter_by_prefix(cls.all(), subdomain + ':', root_kind) def get_subdomain(self): """Gets the entity's subdomain.""" return self.key().name().split(':', 1)[0] subdomain = property(get_subdomain) def get_name(self): """Gets the entity's name (without the subdomain).""" return self.key().name().split(':', 1)[1] name = property(get_name) class Subject(SubdomainMixin, db.Expando): """A thing whose attributes are tracked by this application. Top-level entity, has no parent. Key name: subdomain + ':' + subject name. A subject name is a globally unique ID that starts with a domain name and a slash. In the 'haiti' subdomain, Subjects are health facilities with a government or internationally established health facility ID.""" timestamp = db.DateTimeProperty(auto_now_add=True) # creation time type = db.StringProperty(required=True) # key_name of a SubjectType, # without the subdomain prefix author = db.UserProperty() # who created this Subject # additional properties for the current value of each attribute # (named by Attribute's key_name). This denormalization is for read speed. # Consider an attribute named 'foo'. We will store 6 values here: # foo__ various types, the attribute value # foo__observed datetime, timestamp when the value was valid # foo__author users.User, the user who provided the change # foo__author_nickname string, source of the change # foo__author_affiliation string, affiliation of the source # foo__comment string, a comment about the change # These properties will exist with the following invariants: # 1. If subject.foo__ is not present, that means attribute "foo" has never # existed on this subject at any point in the past. # 2. If subject.foo__ is None, that means some user actually set the # attribute to "(unspecified)". # 3. All six fields are always written together at the same time, and are # never removed. (Hence either all are present or none are present.) @staticmethod def create(subdomain, subject_type_or_type_name, subject_name, author): """Creates a Subject with a given subdomain, type, name, and author.""" return Subject(key_name='%s:%s' % (subdomain, subject_name), type=get_name(subject_type_or_type_name), author=author) @staticmethod def generate_name(host, subject_type_or_type_name): """Makes a new unique subject_name for an original subject (originally created in this repository, not cloned from an external repository).""" id = UniqueId.create_id() return '%s/%s.%d' % (host, get_name(subject_type_or_type_name), id) @staticmethod def get_stored_name(attribute_name): return '%s__' % attribute_name @classmethod def delete_complete(cls, subject): if subject: minimal_subject = MinimalSubject.get_by_subject(subject) db.delete([subject, minimal_subject]) def has_value(self, attribute_name): """Returns the value of the Attribute with the given key_name, or default if it does not exist.""" return hasattr(self, '%s__' % attribute_name) def get_value(self, attribute_name, default=None): """Returns the value of the Attribute with the given key_name, or default if it does not exist.""" return getattr(self, '%s__' % attribute_name, default) def get_observed(self, attribute_name, default=None): """Returns the timestamp when the Attribute with the given key_name was valid, or default if it does not exist.""" return getattr(self, '%s__observed' % attribute_name, default) def get_author(self, attribute_name, default=None): """Returns the author of the Attribute value with the given key_name, or default if it does not exist.""" return getattr(self, '%s__author' % attribute_name, default) def get_author_nickname(self, attribute_name, default=None): """Returns the author nickname of the Attribute value with the given key_name, or default if it does not exist.""" return getattr(self, '%s__author_nickname' % attribute_name, default) def get_author_affiliation(self, attribute_name, default=None): """Returns the affiliation of the author of the Attribute value with the given key_name, or default if it does not exist.""" return getattr(self, '%s__author_affiliation' % attribute_name, default) def get_comment(self, attribute_name, default=None): """Returns the author's comment about the Attribute value with the given key_name, or default if it does not exist.""" return getattr(self, '%s__comment' % attribute_name, default) def set_attribute(self, name, value, observed, author, author_nickname, author_affiliation, comment): """Sets the value for the Attribute with the given key_name.""" setattr(self, '%s__' % name, value_or_none(value)) setattr(self, '%s__observed' % name, value_or_none(observed)) setattr(self, '%s__author' % name, value_or_none(author)) setattr(self, '%s__author_nickname' % name, value_or_none(author_nickname)) setattr(self, '%s__author_affiliation' % name, value_or_none(author_affiliation)) setattr(self, '%s__comment' % name, value_or_none(comment)) class MinimalSubject(SubdomainMixin, db.Expando): """Minimal version of Subject that loads fast from the datastore and contains just the information needed to populate the initial list and map. Parent: Subject. Key name: same as its parent Subject. Wouldn't be necessary if we could select columns from the datastore.""" type = db.StringProperty(required=True) # key_name of a SubjectType, # without the subdomain prefix # More properties for the current values of ONLY the most critically # important attributes of Subject (named by Attribute's key_name). # An attribute named foo will be stored as 'foo__' to match Subject. ROOT_KIND = 'Subject' # filter_by_prefix uses this to filter keys properly @staticmethod def create(subject): return MinimalSubject( subject, key_name=subject.key().name(), type=subject.type) @staticmethod def get_by_subject(subject): """Gets the MinimalSubject entity for the given Subject.""" return MinimalSubject.all().ancestor(subject).get() @staticmethod def get_stored_name(attribute_name): return '%s__' % attribute_name def has_value(self, attribute_name): """Returns the value of the Attribute with the given key_name, or default if it does not exist.""" return hasattr(self, '%s__' % attribute_name) def get_value(self, attribute_name, default=None): """Returns the value of the Attribute with the given key_name, or default if it does not exist.""" return getattr(self, '%s__' % attribute_name, default) def set_attribute(self, name, value): """Sets the value for the Attribute with the given key_name.""" setattr(self, '%s__' % name, value_or_none(value)) class UniqueId(db.Model): """This entity is used just to generate unique numeric IDs.""" @staticmethod def create_id(): """Gets a numeric ID that is guaranteed to be different from any ID previously returned by this static method.""" unique_id = UniqueId() unique_id.put() return unique_id.key().id() class SubjectType(SubdomainMixin, db.Model): """A type of Subject, e.g. hospital, warehouse, charity, camp. Top-level entity, has no parent. Key name: subdomain + ':' + type name. A type name is an identifier used as the value of Subject.type.""" timestamp = db.DateTimeProperty(auto_now_add=True) attribute_names = db.StringListProperty() # key_names of Attribute entities minimal_attribute_names = db.StringListProperty() # key_names of # Attribute entities whose values should be copied into MinimalSubject @staticmethod def create(subdomain, subject_type_name): return SubjectType(key_name=subdomain + ':' + subject_type_name) class Attribute(db.Model): """An attribute of a subject, e.g. services available, # of patients. Top-level entity, has no parent. Key name: name of a property in a Report, and also the name of the Message providing the UI-displayable attribute name.""" timestamp = db.DateTimeProperty(auto_now_add=True) type = db.StringProperty(required=True, choices=[ 'str', # value is a single-line string (Python unicode) 'text', # value is a string, shown as long text (Python unicode) 'contact', # value is a 3-line string (name, phone, e-mail address) 'date', # value is a date (Python datetime with time 00:00:00) 'int', # value is an integer (64-bit long) 'float', # value is a float (Python float, i.e. double) 'bool', # value is a boolean 'choice', # value is a string (one of the elements in 'values') 'multi', # value is a list of strings (which are elements of 'values') 'geopt', # value is a db.GeoPt with latitude and longitude ]) edit_action = db.StringProperty() # What Account action can edit? values = db.StringListProperty() # allowed value names for choice or multi class Report(db.Expando): """A report on the attributes and resources of a Subject. Parent: Subject. A Report may represent a partial update of the attributes of the subject.""" arrived = db.DateTimeProperty(auto_now_add=True) # date we received report source = db.StringProperty() # a URL, the source of the report author = db.UserProperty() # author of the report observed = db.DateTimeProperty() # date that report contents were valid # additional properties for each Attribute (named by Attribute's key_name) # Consider an attribute named 'foo'. We will store 2 values here: # foo__ various, the attribute value # foo__comment db.StringProperty, a comment from user making the change # These properties will exist with the following invariants: # 1. If report.foo__ is not present, that means attribute "foo" should # not change its value from the previous report. # 2. If report.foo__ is None, that means some user actually set the # attribute to "(unspecified)". # 3. Both fields are always written together at the same time, and # are never removed. (Hence either all are present or none are present.) def get_value(self, attribute_name, default=None): """Returns the value of the Attribute with the given key_name, or default if it does not exist.""" return getattr(self, '%s__' % attribute_name, default) def get_comment(self, attribute_name, default=None): """Returns the author's comment about the Attribute value with the given key_name, or default if it does not exist.""" return getattr(self, '%s__comment' % attribute_name, default) def set_attribute(self, name, value, comment): """Sets the value for the Attribute with the given key_name.""" setattr(self, '%s__' % name, value_or_none(value)) setattr(self, '%s__comment' % name, value_or_none(comment)) class Account(db.Model): """User account. Top-level entity, has no parent. Users without Account entities can use the application; for such users, their permissions are determined by the special Account object with key_name='default'. Users get their own Account entities when editing a Subject, requesting permissions, being granted permissions, or subscribing to alerts.""" timestamp = db.DateTimeProperty(auto_now_add=True) # creation time description = db.StringProperty() # full name or description email = db.StringProperty() # e-mail address of the account user_id = db.StringProperty() # users.User.id() of the account nickname = db.StringProperty() # nickname for display in the UI; may # differ from users.User.nickname() affiliation = db.StringProperty() # company or organization, etc. token = db.StringProperty() # secret token for looking up an Account with # no e-mail address (so we can have Account # entities for nonexistent Google Accounts) actions = db.StringListProperty() # actions allowed for this user (items # have the form subdomain + ':' + verb; # '*' is a wildcard subdomain or verb) requested_actions = db.StringListProperty() # permissions requested but # not yet granted locale = db.StringProperty() # user chosen locale # default frequency for updates default_frequency = db.StringProperty(default='instant') # preferred format to receive e-mail in email_format = db.StringProperty(choices=['plain', 'html'], default='plain') # For explanation of default settings for the next alert times, see # mail_alerts.py's send_digests function. # next time to send a daily update next_daily_alert = db.DateTimeProperty(default=MAX_DATE) # next time to send a weekly update to the user next_weekly_alert = db.DateTimeProperty(default=MAX_DATE) # next time to send a monthly update to the user next_monthly_alert = db.DateTimeProperty(default=MAX_DATE) class Message(db.Expando): """Internationalized strings for value identifiers. Top-level entity, has no parent.""" # Formerly namespace, renamed to work around bug in GAE 1.3.5 (b/2811890); # Can change back after 1.3.6, which contains the fix ns = db.StringProperty(required=True, choices=[ 'english', # name is an English string 'attribute_name', # name is an Attribute's key_name 'attribute_value', # name is a value name in a choice or multi attribute 'subject_type' # name is a SubjectType's key_name (including subdomain) ]) name = db.StringProperty() # additional properties for each language (named by locale code) class Dump(db.Model): """A record of the data received from a data source in its native format, before it was converted and loaded into the datastore. Top-level entity, has no parent.""" timestamp = db.DateTimeProperty(auto_now_add=True) base = db.SelfReference() # if present, this dump is a clone of base source = db.StringProperty() # URL identifying the source data = db.BlobProperty() # received raw data # TODO(kpy): Clean up the inconsistent use of the term "subject_name". # In Subscription, subject_name is the entire Subject key including the # subdomain; elsewhere it is just the part after the subdomain. class Subscription(db.Model): """A subscription by a user to receive notification when details for a facility change. Top-level entity, has no parent. Key name: follows the format subject_name:user_email""" user_email = db.StringProperty(required=True) # user to alert subject_name = db.StringProperty(required=True) # key_name of subject frequency = db.StringProperty(required=True, choices=[ 'instant', # send an alert whenever the facility is updated 'daily', # send during a daily digest e-mail 'weekly', # send during a weekly digest e-mail 'monthly' # send during a monthly digest e-mail on the 1st of the month ]) # frequency of updates for this subject @staticmethod def get(subject_name, user_email): """Gets a Subscription entity by its subject_name and e-mail.""" return Subscription.get_by_key_name(subject_name + ':' + user_email) @staticmethod def get_by_subject(subject_name): """Gets a query for all PendingAlert with the given subject name.""" return filter_by_prefix(Subscription.all(), subject_name + ':') class PendingAlert(MinimalSubject): """A pending notification for a user; waiting to be sent on a daily/weekly/ monthly basis, pending the frequency of the particular alert. Top-level entity, has no parent. Key name: follows the format frequency:user_email:subject_name""" user_email = db.StringProperty(required=True) # user to alert subject_name = db.StringProperty(required=True) # key_name of subject timestamp = db.DateTimeProperty() # creation time of the pending alert frequency = db.StringProperty(required=True, choices=[ 'instant', # send an alert whenever the subject is updated 'daily', # send during a daily digest e-mail 'weekly', # send during a weekly digest e-mail 'monthly' # send during a monthly digest e-mail on the 1st of the month ]) # frequency of updates for this subject @staticmethod def get(frequency, user_email, subject_name): """Gets a PendingAlert entity by its frequency, e-mail, and subject name.""" return PendingAlert.get_by_key_name(frequency + ':' + user_email + ':' + subject_name) @staticmethod def get_by_frequency(frequency, user_email): """Gets a query for all PendingAlert with the given frequency and associated user e-mail.""" return filter_by_prefix(PendingAlert.all(), frequency + ':' + user_email + ':') class MailUpdateText(db.Expando): """A map from attribute names and values to alternate values accepted in the mail editing system. They are strings that users can type into e-mails to refer to attribute names or values. Key name: follows the format namespace:name. This table should at all times contain the following special cases for general-use attribute values: true, false, and none. Accepted values are: true: ['Yes', 'y', 'true'] false: ['No', 'n', 'false'] none: ['*none'] by default. In total, the number of entities in the table should be equal to the number of attribute entities, plus the unique values across all multi and choice attributes, plus the 3 general values defined above.""" # name is an attribute name or attribute value; see below name = db.StringProperty(required=True) ns = db.StringProperty(required=True, choices=[ 'attribute_name', # name is an attribute's key_name 'attribute_value' # name is a value name in a choice or multi attribute ]) # Expando values should be initialized on a per-language basis as a list # of accepted input strings for this particular map, in that language. # Use the same naming format as in the Message table [en for English, # fr for French, etc.]. The first value in each list should match the # corresponding Message, to make mail editing behavior resilient to # changes in translations. For comparison purposes, each item in the # list will be treated as case-insensitive. Spaces should be used in # favor of underscores. @classmethod def get(cls, ns, name): """Gets an entity by its namespace and name.""" key_name = '%s:%s' % (ns, name) return cls.get_by_key_name(key_name) @classmethod def create(cls, ns, name, **kwargs): """Creates an entity with the specified namespace and name.""" key_name = '%s:%s' % (ns, name) return cls(key_name=key_name, ns=ns, name=name, **kwargs) @classmethod def all_in_namespace(cls, ns): """Gets a query for all entities with the given namespace.""" return filter_by_prefix(cls.all(), ns + ':')
455
0
130
d9b6dacb47bb3bb1903c75fd6a311e2b090af877
12,452
py
Python
rllib/policy/torch_policy.py
brechtmann/ray
0c76ebd676f794847ea990aecced22b88717d09e
[ "Apache-2.0" ]
null
null
null
rllib/policy/torch_policy.py
brechtmann/ray
0c76ebd676f794847ea990aecced22b88717d09e
[ "Apache-2.0" ]
null
null
null
rllib/policy/torch_policy.py
brechtmann/ray
0c76ebd676f794847ea990aecced22b88717d09e
[ "Apache-2.0" ]
null
null
null
import numpy as np import time from ray.rllib.policy.policy import Policy, LEARNER_STATS_KEY, ACTION_PROB, \ ACTION_LOGP from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.utils.annotations import override, DeveloperAPI from ray.rllib.utils.framework import try_import_torch from ray.rllib.utils.schedules import ConstantSchedule, PiecewiseSchedule from ray.rllib.utils.torch_ops import convert_to_non_torch_type from ray.rllib.utils.tracking_dict import UsageTrackingDict torch, _ = try_import_torch() class TorchPolicy(Policy): """Template for a PyTorch policy and loss to use with RLlib. This is similar to TFPolicy, but for PyTorch. Attributes: observation_space (gym.Space): observation space of the policy. action_space (gym.Space): action space of the policy. config (dict): config of the policy. model (TorchModel): Torch model instance. dist_class (type): Torch action distribution class. """ def __init__(self, observation_space, action_space, config, model, loss, action_distribution_class): """Build a policy from policy and loss torch modules. Note that model will be placed on GPU device if CUDA_VISIBLE_DEVICES is set. Only single GPU is supported for now. Arguments: observation_space (gym.Space): observation space of the policy. action_space (gym.Space): action space of the policy. config (dict): The Policy config dict. model (nn.Module): PyTorch policy module. Given observations as input, this module must return a list of outputs where the first item is action logits, and the rest can be any value. loss (func): Function that takes (policy, model, dist_class, train_batch) and returns a single scalar loss. action_distribution_class (ActionDistribution): Class for action distribution. """ self.framework = "torch" super().__init__(observation_space, action_space, config) self.device = (torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")) self.model = model.to(self.device) self.unwrapped_model = model # used to support DistributedDataParallel self._loss = loss self._optimizer = self.optimizer() self.dist_class = action_distribution_class # If set, means we are using distributed allreduce during learning. self.distributed_world_size = None @override(Policy) @override(Policy) @override(Policy) @override(Policy) @override(Policy) @override(Policy) @override(Policy) @override(Policy) @override(Policy) @override(Policy) def extra_grad_process(self): """Allow subclass to do extra processing on gradients and return processing info.""" return {} def extra_action_out(self, input_dict, state_batches, model, action_dist=None): """Returns dict of extra info to include in experience batch. Arguments: input_dict (dict): Dict of model input tensors. state_batches (list): List of state tensors. model (TorchModelV2): Reference to the model. action_dist (Distribution): Torch Distribution object to get log-probs (e.g. for already sampled actions). """ return {} def extra_grad_info(self, train_batch): """Return dict of extra grad info.""" return {} def optimizer(self): """Custom PyTorch optimizer to use.""" if hasattr(self, "config"): return torch.optim.Adam( self.model.parameters(), lr=self.config["lr"]) else: return torch.optim.Adam(self.model.parameters()) @override(Policy) def export_model(self, export_dir): """TODO: implement for torch. """ raise NotImplementedError @override(Policy) def export_checkpoint(self, export_dir): """TODO: implement for torch. """ raise NotImplementedError @DeveloperAPI class LearningRateSchedule: """Mixin for TFPolicy that adds a learning rate schedule.""" @DeveloperAPI @override(Policy) @override(TorchPolicy) @DeveloperAPI class EntropyCoeffSchedule: """Mixin for TorchPolicy that adds entropy coeff decay.""" @DeveloperAPI @override(Policy)
37.963415
79
0.608336
import numpy as np import time from ray.rllib.policy.policy import Policy, LEARNER_STATS_KEY, ACTION_PROB, \ ACTION_LOGP from ray.rllib.policy.sample_batch import SampleBatch from ray.rllib.utils.annotations import override, DeveloperAPI from ray.rllib.utils.framework import try_import_torch from ray.rllib.utils.schedules import ConstantSchedule, PiecewiseSchedule from ray.rllib.utils.torch_ops import convert_to_non_torch_type from ray.rllib.utils.tracking_dict import UsageTrackingDict torch, _ = try_import_torch() class TorchPolicy(Policy): """Template for a PyTorch policy and loss to use with RLlib. This is similar to TFPolicy, but for PyTorch. Attributes: observation_space (gym.Space): observation space of the policy. action_space (gym.Space): action space of the policy. config (dict): config of the policy. model (TorchModel): Torch model instance. dist_class (type): Torch action distribution class. """ def __init__(self, observation_space, action_space, config, model, loss, action_distribution_class): """Build a policy from policy and loss torch modules. Note that model will be placed on GPU device if CUDA_VISIBLE_DEVICES is set. Only single GPU is supported for now. Arguments: observation_space (gym.Space): observation space of the policy. action_space (gym.Space): action space of the policy. config (dict): The Policy config dict. model (nn.Module): PyTorch policy module. Given observations as input, this module must return a list of outputs where the first item is action logits, and the rest can be any value. loss (func): Function that takes (policy, model, dist_class, train_batch) and returns a single scalar loss. action_distribution_class (ActionDistribution): Class for action distribution. """ self.framework = "torch" super().__init__(observation_space, action_space, config) self.device = (torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")) self.model = model.to(self.device) self.unwrapped_model = model # used to support DistributedDataParallel self._loss = loss self._optimizer = self.optimizer() self.dist_class = action_distribution_class # If set, means we are using distributed allreduce during learning. self.distributed_world_size = None @override(Policy) def compute_actions(self, obs_batch, state_batches=None, prev_action_batch=None, prev_reward_batch=None, info_batch=None, episodes=None, explore=None, timestep=None, **kwargs): explore = explore if explore is not None else self.config["explore"] with torch.no_grad(): input_dict = self._lazy_tensor_dict({ SampleBatch.CUR_OBS: obs_batch, }) if prev_action_batch: input_dict[SampleBatch.PREV_ACTIONS] = prev_action_batch if prev_reward_batch: input_dict[SampleBatch.PREV_REWARDS] = prev_reward_batch state_batches = [self._convert_to_tensor(s) for s in state_batches] model_out = self.model(input_dict, state_batches, self._convert_to_tensor([1])) logits, state = model_out action_dist = None actions, logp = \ self.exploration.get_exploration_action( logits, self.dist_class, self.model, timestep if timestep is not None else self.global_timestep, explore) input_dict[SampleBatch.ACTIONS] = actions extra_action_out = self.extra_action_out(input_dict, state_batches, self.model, action_dist) if logp is not None: logp = convert_to_non_torch_type(logp) extra_action_out.update({ ACTION_PROB: np.exp(logp), ACTION_LOGP: logp }) return convert_to_non_torch_type( (actions, state, extra_action_out)) @override(Policy) def compute_log_likelihoods(self, actions, obs_batch, state_batches=None, prev_action_batch=None, prev_reward_batch=None): with torch.no_grad(): input_dict = self._lazy_tensor_dict({ SampleBatch.CUR_OBS: obs_batch, SampleBatch.ACTIONS: actions }) if prev_action_batch: input_dict[SampleBatch.PREV_ACTIONS] = prev_action_batch if prev_reward_batch: input_dict[SampleBatch.PREV_REWARDS] = prev_reward_batch parameters, _ = self.model(input_dict, state_batches, [1]) action_dist = self.dist_class(parameters, self.model) log_likelihoods = action_dist.logp(input_dict[SampleBatch.ACTIONS]) return log_likelihoods @override(Policy) def learn_on_batch(self, postprocessed_batch): train_batch = self._lazy_tensor_dict(postprocessed_batch) loss_out = self._loss(self, self.model, self.dist_class, train_batch) self._optimizer.zero_grad() loss_out.backward() info = {} info.update(self.extra_grad_process()) if self.distributed_world_size: grads = [] for p in self.model.parameters(): if p.grad is not None: grads.append(p.grad) start = time.time() if torch.cuda.is_available(): # Sadly, allreduce_coalesced does not work with CUDA yet. for g in grads: torch.distributed.all_reduce( g, op=torch.distributed.ReduceOp.SUM) else: torch.distributed.all_reduce_coalesced( grads, op=torch.distributed.ReduceOp.SUM) for p in self.model.parameters(): if p.grad is not None: p.grad /= self.distributed_world_size info["allreduce_latency"] = time.time() - start self._optimizer.step() info.update(self.extra_grad_info(train_batch)) return {LEARNER_STATS_KEY: info} @override(Policy) def compute_gradients(self, postprocessed_batch): train_batch = self._lazy_tensor_dict(postprocessed_batch) loss_out = self._loss(self, self.model, self.dist_class, train_batch) self._optimizer.zero_grad() loss_out.backward() grad_process_info = self.extra_grad_process() # Note that return values are just references; # calling zero_grad will modify the values grads = [] for p in self.model.parameters(): if p.grad is not None: grads.append(p.grad.data.cpu().numpy()) else: grads.append(None) grad_info = self.extra_grad_info(train_batch) grad_info.update(grad_process_info) return grads, {LEARNER_STATS_KEY: grad_info} @override(Policy) def apply_gradients(self, gradients): for g, p in zip(gradients, self.model.parameters()): if g is not None: p.grad = torch.from_numpy(g).to(self.device) self._optimizer.step() @override(Policy) def get_weights(self): return {k: v.cpu() for k, v in self.model.state_dict().items()} @override(Policy) def set_weights(self, weights): self.model.load_state_dict(weights) @override(Policy) def is_recurrent(self): return len(self.model.get_initial_state()) > 0 @override(Policy) def num_state_tensors(self): return len(self.model.get_initial_state()) @override(Policy) def get_initial_state(self): return [s.numpy() for s in self.model.get_initial_state()] def extra_grad_process(self): """Allow subclass to do extra processing on gradients and return processing info.""" return {} def extra_action_out(self, input_dict, state_batches, model, action_dist=None): """Returns dict of extra info to include in experience batch. Arguments: input_dict (dict): Dict of model input tensors. state_batches (list): List of state tensors. model (TorchModelV2): Reference to the model. action_dist (Distribution): Torch Distribution object to get log-probs (e.g. for already sampled actions). """ return {} def extra_grad_info(self, train_batch): """Return dict of extra grad info.""" return {} def optimizer(self): """Custom PyTorch optimizer to use.""" if hasattr(self, "config"): return torch.optim.Adam( self.model.parameters(), lr=self.config["lr"]) else: return torch.optim.Adam(self.model.parameters()) def _lazy_tensor_dict(self, postprocessed_batch): train_batch = UsageTrackingDict(postprocessed_batch) train_batch.set_get_interceptor(self._convert_to_tensor) return train_batch def _convert_to_tensor(self, arr): if torch.is_tensor(arr): return arr.to(self.device) tensor = torch.from_numpy(np.asarray(arr)) if tensor.dtype == torch.double: tensor = tensor.float() return tensor.to(self.device) @override(Policy) def export_model(self, export_dir): """TODO: implement for torch. """ raise NotImplementedError @override(Policy) def export_checkpoint(self, export_dir): """TODO: implement for torch. """ raise NotImplementedError @DeveloperAPI class LearningRateSchedule: """Mixin for TFPolicy that adds a learning rate schedule.""" @DeveloperAPI def __init__(self, lr, lr_schedule): self.cur_lr = lr if lr_schedule is None: self.lr_schedule = ConstantSchedule(lr, framework=None) else: self.lr_schedule = PiecewiseSchedule( lr_schedule, outside_value=lr_schedule[-1][-1], framework=None) @override(Policy) def on_global_var_update(self, global_vars): super(LearningRateSchedule, self).on_global_var_update(global_vars) self.cur_lr = self.lr_schedule.value(global_vars["timestep"]) @override(TorchPolicy) def optimizer(self): for p in self._optimizer.param_groups: p["lr"] = self.cur_lr return self._optimizer @DeveloperAPI class EntropyCoeffSchedule: """Mixin for TorchPolicy that adds entropy coeff decay.""" @DeveloperAPI def __init__(self, entropy_coeff, entropy_coeff_schedule): self.entropy_coeff = entropy_coeff if entropy_coeff_schedule is None: self.entropy_coeff_schedule = ConstantSchedule( entropy_coeff, framework=None) else: # Allows for custom schedule similar to lr_schedule format if isinstance(entropy_coeff_schedule, list): self.entropy_coeff_schedule = PiecewiseSchedule( entropy_coeff_schedule, outside_value=entropy_coeff_schedule[-1][-1], framework=None) else: # Implements previous version but enforces outside_value self.entropy_coeff_schedule = PiecewiseSchedule( [[0, entropy_coeff], [entropy_coeff_schedule, 0.0]], outside_value=0.0, framework=None) @override(Policy) def on_global_var_update(self, global_vars): super(EntropyCoeffSchedule, self).on_global_var_update(global_vars) self.entropy_coeff = self.entropy_coeff_schedule.value( global_vars["timestep"])
7,402
0
444
3788688439f869905b054a77ecac95a103f926f6
1,297
py
Python
sshd_telegram_alert/__main__.py
nanih98/sshd-telegram-alert
7f5adb662658bcf3dc9020113f6133b1184a971b
[ "MIT" ]
null
null
null
sshd_telegram_alert/__main__.py
nanih98/sshd-telegram-alert
7f5adb662658bcf3dc9020113f6133b1184a971b
[ "MIT" ]
null
null
null
sshd_telegram_alert/__main__.py
nanih98/sshd-telegram-alert
7f5adb662658bcf3dc9020113f6133b1184a971b
[ "MIT" ]
null
null
null
import os from .parser import parse_args from .logger import Logger from .configuration import Configuration from .requester import Requester import platform def main() -> None: """Main function where the program start""" args = parse_args() config_path = check_os_path() log = Logger(debug_flag=True) log.success("Starting the program") config = Configuration() # config.check_os() config.create_config(args, config_path, platform.system()) message = config.message(args) requester = Requester() requester.requester(args, config_path, message) #requester.requester(config_path, message, args, platform.system()) # # Initzialize logger # logging.basicConfig( # format="%(asctime)-5s %(name)-15s %(levelname)-8s %(message)s", # level = args.log_level, # #filename='/tmp/python-telegram-bot.log', # encoding='utf-8' # ) # Start program if __name__ == "__main__": main()
25.94
86
0.656901
import os from .parser import parse_args from .logger import Logger from .configuration import Configuration from .requester import Requester import platform def check_os_path(): if platform.system() == "Darwin": print(os.path.join(os.environ.get('HOME'), ".sshd-telegram-credentials.json")) return os.path.join(os.environ.get('HOME'), ".sshd-telegram-credentials.json") else: return "/etc/ssh/.sshd-telegram-credentials.json" def main() -> None: """Main function where the program start""" args = parse_args() config_path = check_os_path() log = Logger(debug_flag=True) log.success("Starting the program") config = Configuration() # config.check_os() config.create_config(args, config_path, platform.system()) message = config.message(args) requester = Requester() requester.requester(args, config_path, message) #requester.requester(config_path, message, args, platform.system()) # # Initzialize logger # logging.basicConfig( # format="%(asctime)-5s %(name)-15s %(levelname)-8s %(message)s", # level = args.log_level, # #filename='/tmp/python-telegram-bot.log', # encoding='utf-8' # ) # Start program if __name__ == "__main__": main()
293
0
23
5444b2b64f6468f6af4c6c70f4231a23dad1d247
4,773
py
Python
bans/database/db_bans.py
examknow/bans
2a405788770090e552e605e00e70cfb2cf910ac1
[ "MIT" ]
null
null
null
bans/database/db_bans.py
examknow/bans
2a405788770090e552e605e00e70cfb2cf910ac1
[ "MIT" ]
null
null
null
bans/database/db_bans.py
examknow/bans
2a405788770090e552e605e00e70cfb2cf910ac1
[ "MIT" ]
null
null
null
from aiosqlite import connect as db_connect from time import time from typing import List, Optional from dataclasses import dataclass from .common import DBTable @dataclass
30.208861
100
0.503247
from aiosqlite import connect as db_connect from time import time from typing import List, Optional from dataclasses import dataclass from .common import DBTable @dataclass class DBBan(object): id: int channelid: int setter: str mode: str ts: int mask: Optional[int] expiry: Optional[int] removed: Optional[int] remover: Optional[str] reason: Optional[str] class BansTable(DBTable): async def add(self, channel: int, setter: str, mode: str, mask: Optional[str] = None, expiry: Optional[int] = None, reason: Optional[str] = None): async with db_connect(self._db_location) as db: await db.execute(""" INSERT INTO bans (channel_id, setter, mode, mask, ts, expiry_ts, reason) VALUES (?, ?, ?, ?, ?, ?, ?) """, [channel, setter, mode, mask, int(time()), expiry, reason]) await db.commit() cursor = await db.execute(""" SELECT id FROM bans ORDER BY id DESC LIMIT 1 """) return (await cursor.fetchone())[0] async def _get(self, where: str, limit: Optional[int], *args: str) -> List[DBBan]: async with db_connect(self._db_location) as db: limit_str = "" if limit is not None: limit_str = f"LIMIT {limit}" query = f""" SELECT id, channel_id, setter, mode, ts, mask, expiry_ts, remove_ts, remover, reason FROM bans {where} {limit_str} """ cursor = await db.execute(query, args) rows = await cursor.fetchall() return [DBBan(*row) for row in rows] async def get_by_id(self, id: int) -> List[DBBan]: try: return (await self._get("WHERE id = ?", 1, id))[0] except IndexError: return None async def get_by_channel(self, channel: int, by_active: Optional[bool] = None, by_setter: Optional[str] = None, limit: Optional[int] = 10) -> List[DBBan]: args: List[str] = [] where = "WHERE channel_id = ?" args.append(channel) if by_active is not None: where += by_active == True and " AND remove_ts IS NULL" or " AND remove_ts IS NOT NULL" if by_setter is not None: where += f" AND setter LIKE ?" args.append(by_setter) return await self._get(where, limit, *args) async def get_expired(self) -> List[DBBan]: return await self._get("WHERE expiry_ts < ? AND remove_ts IS NULL", None, int(time())) async def get_last_by_setter(self, setter: str, count: int) -> List[DBBan]: return await self._get("WHERE setter = ? AND remove_ts IS NULL", count, setter) async def get_id(self, channel: str, mask: str) -> Optional[DBBan]: async with db_connect(self._db_location) as db: cursor = await db.execute(""" SELECT id FROM bans WHERE remove_ts IS NULL ORDER BY id DESC LIMIT 1""") res = await cursor.fetchone() if len(res) > 0: return res[0] else: return None async def set_reason(self, id: int, reason: str): async with db_connect(self._db_location) as db: await db.execute(""" UPDATE bans SET reason = ? WHERE id = ?""", [reason, id]) await db.commit() async def set_expiry(self, id: int, expiry: Optional[int]): async with db_connect(self._db_location) as db: await db.execute(""" UPDATE bans SET expiry_ts = ? WHERE id = ?""", [expiry, id]) await db.commit() async def set_reason(self, id: int, reason: str): async with db_connect(self._db_location) as db: await db.execute(""" UPDATE bans SET reason = ? WHERE id = ?""", [reason, id]) await db.commit() async def remove(self, id: int, remover: Optional[str] = None): async with db_connect(self._db_location) as db: await db.execute(""" UPDATE bans SET remove_ts = ?, remover = ? WHERE id = ?""", [int(time()), remover, id]) await db.commit()
4,035
206
341
077847e78c05efc885c4770c62e1e3d7fa340596
6,461
py
Python
azure-mgmt-search/azure/mgmt/search/models/search_service.py
JonathanGailliez/azure-sdk-for-python
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
[ "MIT" ]
1
2021-09-07T18:36:04.000Z
2021-09-07T18:36:04.000Z
azure-mgmt-search/azure/mgmt/search/models/search_service.py
JonathanGailliez/azure-sdk-for-python
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
[ "MIT" ]
2
2019-10-02T23:37:38.000Z
2020-10-02T01:17:31.000Z
azure-mgmt-search/azure/mgmt/search/models/search_service.py
JonathanGailliez/azure-sdk-for-python
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
[ "MIT" ]
1
2018-08-28T14:36:47.000Z
2018-08-28T14:36:47.000Z
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .resource import Resource class SearchService(Resource): """Describes an Azure Search service and its current state. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The ID of the resource. This can be used with the Azure Resource Manager to link resources together. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The resource type. :vartype type: str :param location: The geographic location of the resource. This must be one of the supported and registered Azure Geo Regions (for example, West US, East US, Southeast Asia, and so forth). This property is required when creating a new resource. :type location: str :param tags: Tags to help categorize the resource in the Azure portal. :type tags: dict[str, str] :param replica_count: The number of replicas in the Search service. If specified, it must be a value between 1 and 12 inclusive for standard SKUs or between 1 and 3 inclusive for basic SKU. Default value: 1 . :type replica_count: int :param partition_count: The number of partitions in the Search service; if specified, it can be 1, 2, 3, 4, 6, or 12. Values greater than 1 are only valid for standard SKUs. For 'standard3' services with hostingMode set to 'highDensity', the allowed values are between 1 and 3. Default value: 1 . :type partition_count: int :param hosting_mode: Applicable only for the standard3 SKU. You can set this property to enable up to 3 high density partitions that allow up to 1000 indexes, which is much higher than the maximum indexes allowed for any other SKU. For the standard3 SKU, the value is either 'default' or 'highDensity'. For all other SKUs, this value must be 'default'. Possible values include: 'default', 'highDensity'. Default value: "default" . :type hosting_mode: str or ~azure.mgmt.search.models.HostingMode :ivar status: The status of the Search service. Possible values include: 'running': The Search service is running and no provisioning operations are underway. 'provisioning': The Search service is being provisioned or scaled up or down. 'deleting': The Search service is being deleted. 'degraded': The Search service is degraded. This can occur when the underlying search units are not healthy. The Search service is most likely operational, but performance might be slow and some requests might be dropped. 'disabled': The Search service is disabled. In this state, the service will reject all API requests. 'error': The Search service is in an error state. If your service is in the degraded, disabled, or error states, it means the Azure Search team is actively investigating the underlying issue. Dedicated services in these states are still chargeable based on the number of search units provisioned. Possible values include: 'running', 'provisioning', 'deleting', 'degraded', 'disabled', 'error' :vartype status: str or ~azure.mgmt.search.models.SearchServiceStatus :ivar status_details: The details of the Search service status. :vartype status_details: str :ivar provisioning_state: The state of the last provisioning operation performed on the Search service. Provisioning is an intermediate state that occurs while service capacity is being established. After capacity is set up, provisioningState changes to either 'succeeded' or 'failed'. Client applications can poll provisioning status (the recommended polling interval is from 30 seconds to one minute) by using the Get Search Service operation to see when an operation is completed. If you are using the free service, this value tends to come back as 'succeeded' directly in the call to Create Search service. This is because the free service uses capacity that is already set up. Possible values include: 'succeeded', 'provisioning', 'failed' :vartype provisioning_state: str or ~azure.mgmt.search.models.ProvisioningState :param sku: The SKU of the Search Service, which determines price tier and capacity limits. This property is required when creating a new Search Service. :type sku: ~azure.mgmt.search.models.Sku """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'replica_count': {'maximum': 12, 'minimum': 1}, 'partition_count': {'maximum': 12, 'minimum': 1}, 'status': {'readonly': True}, 'status_details': {'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'replica_count': {'key': 'properties.replicaCount', 'type': 'int'}, 'partition_count': {'key': 'properties.partitionCount', 'type': 'int'}, 'hosting_mode': {'key': 'properties.hostingMode', 'type': 'HostingMode'}, 'status': {'key': 'properties.status', 'type': 'SearchServiceStatus'}, 'status_details': {'key': 'properties.statusDetails', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'ProvisioningState'}, 'sku': {'key': 'sku', 'type': 'Sku'}, }
52.959016
99
0.67358
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .resource import Resource class SearchService(Resource): """Describes an Azure Search service and its current state. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: The ID of the resource. This can be used with the Azure Resource Manager to link resources together. :vartype id: str :ivar name: The name of the resource. :vartype name: str :ivar type: The resource type. :vartype type: str :param location: The geographic location of the resource. This must be one of the supported and registered Azure Geo Regions (for example, West US, East US, Southeast Asia, and so forth). This property is required when creating a new resource. :type location: str :param tags: Tags to help categorize the resource in the Azure portal. :type tags: dict[str, str] :param replica_count: The number of replicas in the Search service. If specified, it must be a value between 1 and 12 inclusive for standard SKUs or between 1 and 3 inclusive for basic SKU. Default value: 1 . :type replica_count: int :param partition_count: The number of partitions in the Search service; if specified, it can be 1, 2, 3, 4, 6, or 12. Values greater than 1 are only valid for standard SKUs. For 'standard3' services with hostingMode set to 'highDensity', the allowed values are between 1 and 3. Default value: 1 . :type partition_count: int :param hosting_mode: Applicable only for the standard3 SKU. You can set this property to enable up to 3 high density partitions that allow up to 1000 indexes, which is much higher than the maximum indexes allowed for any other SKU. For the standard3 SKU, the value is either 'default' or 'highDensity'. For all other SKUs, this value must be 'default'. Possible values include: 'default', 'highDensity'. Default value: "default" . :type hosting_mode: str or ~azure.mgmt.search.models.HostingMode :ivar status: The status of the Search service. Possible values include: 'running': The Search service is running and no provisioning operations are underway. 'provisioning': The Search service is being provisioned or scaled up or down. 'deleting': The Search service is being deleted. 'degraded': The Search service is degraded. This can occur when the underlying search units are not healthy. The Search service is most likely operational, but performance might be slow and some requests might be dropped. 'disabled': The Search service is disabled. In this state, the service will reject all API requests. 'error': The Search service is in an error state. If your service is in the degraded, disabled, or error states, it means the Azure Search team is actively investigating the underlying issue. Dedicated services in these states are still chargeable based on the number of search units provisioned. Possible values include: 'running', 'provisioning', 'deleting', 'degraded', 'disabled', 'error' :vartype status: str or ~azure.mgmt.search.models.SearchServiceStatus :ivar status_details: The details of the Search service status. :vartype status_details: str :ivar provisioning_state: The state of the last provisioning operation performed on the Search service. Provisioning is an intermediate state that occurs while service capacity is being established. After capacity is set up, provisioningState changes to either 'succeeded' or 'failed'. Client applications can poll provisioning status (the recommended polling interval is from 30 seconds to one minute) by using the Get Search Service operation to see when an operation is completed. If you are using the free service, this value tends to come back as 'succeeded' directly in the call to Create Search service. This is because the free service uses capacity that is already set up. Possible values include: 'succeeded', 'provisioning', 'failed' :vartype provisioning_state: str or ~azure.mgmt.search.models.ProvisioningState :param sku: The SKU of the Search Service, which determines price tier and capacity limits. This property is required when creating a new Search Service. :type sku: ~azure.mgmt.search.models.Sku """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'replica_count': {'maximum': 12, 'minimum': 1}, 'partition_count': {'maximum': 12, 'minimum': 1}, 'status': {'readonly': True}, 'status_details': {'readonly': True}, 'provisioning_state': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'replica_count': {'key': 'properties.replicaCount', 'type': 'int'}, 'partition_count': {'key': 'properties.partitionCount', 'type': 'int'}, 'hosting_mode': {'key': 'properties.hostingMode', 'type': 'HostingMode'}, 'status': {'key': 'properties.status', 'type': 'SearchServiceStatus'}, 'status_details': {'key': 'properties.statusDetails', 'type': 'str'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'ProvisioningState'}, 'sku': {'key': 'sku', 'type': 'Sku'}, } def __init__(self, **kwargs): super(SearchService, self).__init__(**kwargs) self.replica_count = kwargs.get('replica_count', 1) self.partition_count = kwargs.get('partition_count', 1) self.hosting_mode = kwargs.get('hosting_mode', "default") self.status = None self.status_details = None self.provisioning_state = None self.sku = kwargs.get('sku', None)
396
0
27
a50e62be14975592aef74c3fa5ad879365e37cfa
1,893
py
Python
Prediction.py
sarthak25/Google-Stock-Price-Prediction
017f78b20def5ca5a3e2727051bcbc735b9dc33c
[ "MIT" ]
1
2021-07-04T12:09:35.000Z
2021-07-04T12:09:35.000Z
Prediction.py
sarthak25/Google-Stock-Price-Prediction
017f78b20def5ca5a3e2727051bcbc735b9dc33c
[ "MIT" ]
null
null
null
Prediction.py
sarthak25/Google-Stock-Price-Prediction
017f78b20def5ca5a3e2727051bcbc735b9dc33c
[ "MIT" ]
1
2020-11-16T18:31:57.000Z
2020-11-16T18:31:57.000Z
# A PREDICTIVE MODEL FOR GOOGLE STOCK PRICES USING LINEAR REGRESSION # IMPORTING ALL THE MODULES import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression # READING THE DATASET AND VISUALIZING A FEW ENTRIES dt=pd.read_csv("Dataset/Google_Stock_Price_Train.csv") print("Entries of the dataset:") print(dt.head()) # PREPARATION OF THE TRAINING DATASET df=dt.iloc[:,1:2].values # NUMPY ARRAY print("Shape of the training dataset:") print(dt.shape) x_train=[] y_train=[] for i in range(60,1258): x_train.append(df[i-60:i,0]) y_train.append(df[i,0]) x_train,y_train= np.array(x_train), np.array(y_train) print("The Converted X_train shape:") print(x_train.shape) print("The Converted Y_train shape:") print(y_train.shape) # PREPARATION OF THE TESTING DATASET df_test = pd.read_csv('Dataset/Google_Stock_Price_Test.csv') Real_stock_price = df_test.iloc[:,1:2].values print("Shape of the testing dataset:") print(Real_stock_price.shape) total_dataset = pd.concat((dt['Open'], df_test['Open']),axis = 0 ) inputs = total_dataset[len(total_dataset)-len(df_test) - 60:].values inputs = inputs.reshape(-1,1) print("Shape of the dataset after conversion:") print(inputs.shape) x_test = [] for i in range (60,80): x_test.append(inputs[i-60:i,0]) x_test=np.array(x_test) print("Converted X_test shape:") print(x_test.shape) # FITTING THE DATA IN THE MODEL lrm = LinearRegression() print(lrm.fit(x_train,y_train)) pred_lrm = lrm.predict(x_test) print("Prediction:") print(pred_lrm) # PLOTTING THE GRAPH plt.plot(Real_stock_price,color='red', label='Real Result') plt.plot(pred_lrm,color='green',label='Predicted by Model') plt.title('Google Stock Price Prediction') plt.xlabel('Time') plt.ylabel('Price') plt.legend() plt.show()
27.042857
69
0.725304
# A PREDICTIVE MODEL FOR GOOGLE STOCK PRICES USING LINEAR REGRESSION # IMPORTING ALL THE MODULES import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression # READING THE DATASET AND VISUALIZING A FEW ENTRIES dt=pd.read_csv("Dataset/Google_Stock_Price_Train.csv") print("Entries of the dataset:") print(dt.head()) # PREPARATION OF THE TRAINING DATASET df=dt.iloc[:,1:2].values # NUMPY ARRAY print("Shape of the training dataset:") print(dt.shape) x_train=[] y_train=[] for i in range(60,1258): x_train.append(df[i-60:i,0]) y_train.append(df[i,0]) x_train,y_train= np.array(x_train), np.array(y_train) print("The Converted X_train shape:") print(x_train.shape) print("The Converted Y_train shape:") print(y_train.shape) # PREPARATION OF THE TESTING DATASET df_test = pd.read_csv('Dataset/Google_Stock_Price_Test.csv') Real_stock_price = df_test.iloc[:,1:2].values print("Shape of the testing dataset:") print(Real_stock_price.shape) total_dataset = pd.concat((dt['Open'], df_test['Open']),axis = 0 ) inputs = total_dataset[len(total_dataset)-len(df_test) - 60:].values inputs = inputs.reshape(-1,1) print("Shape of the dataset after conversion:") print(inputs.shape) x_test = [] for i in range (60,80): x_test.append(inputs[i-60:i,0]) x_test=np.array(x_test) print("Converted X_test shape:") print(x_test.shape) # FITTING THE DATA IN THE MODEL lrm = LinearRegression() print(lrm.fit(x_train,y_train)) pred_lrm = lrm.predict(x_test) print("Prediction:") print(pred_lrm) # PLOTTING THE GRAPH plt.plot(Real_stock_price,color='red', label='Real Result') plt.plot(pred_lrm,color='green',label='Predicted by Model') plt.title('Google Stock Price Prediction') plt.xlabel('Time') plt.ylabel('Price') plt.legend() plt.show()
0
0
0
f95e1333ac543570c7afc2700e11021923f0fd0f
579
py
Python
torchtools/test.py
Takezo87/torchtools
4230305d9063dabee3614f0dcd8557739b90f817
[ "Apache-2.0" ]
null
null
null
torchtools/test.py
Takezo87/torchtools
4230305d9063dabee3614f0dcd8557739b90f817
[ "Apache-2.0" ]
1
2022-02-26T06:23:52.000Z
2022-02-26T06:23:52.000Z
torchtools/test.py
Takezo87/torchtools
4230305d9063dabee3614f0dcd8557739b90f817
[ "Apache-2.0" ]
null
null
null
# AUTOGENERATED! DO NOT EDIT! File to edit: 100_test.ipynb (unless otherwise specified). __all__ = ['items_to_arrays'] # Cell from .core import * from .data import * from .models import * from .datasets import * from .augmentations import * from .datablock import * from .dataloader import * # Cell def items_to_arrays(items): '''convert list of item tuples into X,y numpy arrays (for use with numpy dataloader)''' # return np.stack([x[0] for x in items]), np.stack([x[1] for x in items]) return tuple(np.stack([x[i] for x in items]) for i in range(len(items[0])))
32.166667
91
0.704663
# AUTOGENERATED! DO NOT EDIT! File to edit: 100_test.ipynb (unless otherwise specified). __all__ = ['items_to_arrays'] # Cell from .core import * from .data import * from .models import * from .datasets import * from .augmentations import * from .datablock import * from .dataloader import * # Cell def items_to_arrays(items): '''convert list of item tuples into X,y numpy arrays (for use with numpy dataloader)''' # return np.stack([x[0] for x in items]), np.stack([x[1] for x in items]) return tuple(np.stack([x[i] for x in items]) for i in range(len(items[0])))
0
0
0
13d97300471566ef042dc75cbe53f61f814fbf4a
28,681
py
Python
sopel/tools/__init__.py
cottongin/sopel
54e42747959d86ddc1ba40bc02098cb3ab7d9439
[ "EFL-2.0" ]
null
null
null
sopel/tools/__init__.py
cottongin/sopel
54e42747959d86ddc1ba40bc02098cb3ab7d9439
[ "EFL-2.0" ]
null
null
null
sopel/tools/__init__.py
cottongin/sopel
54e42747959d86ddc1ba40bc02098cb3ab7d9439
[ "EFL-2.0" ]
null
null
null
# coding=utf-8 """Useful miscellaneous tools and shortcuts for Sopel plugins *Availability: 3+* """ # tools.py - Sopel misc tools # Copyright 2008, Sean B. Palmer, inamidst.com # Copyright © 2012, Elad Alfassa <elad@fedoraproject.org> # Copyright 2012, Elsie Powell, embolalia.com # Licensed under the Eiffel Forum License 2. # https://sopel.chat from __future__ import absolute_import, division, print_function, unicode_literals import codecs from collections import defaultdict import functools import logging import os import re import sys import threading import traceback from pkg_resources import parse_version from sopel import __version__ from sopel.tools._events import events # NOQA if sys.version_info.major >= 3: raw_input = input unicode = str iteritems = dict.items itervalues = dict.values iterkeys = dict.keys else: iteritems = dict.iteritems itervalues = dict.itervalues iterkeys = dict.iterkeys _channel_prefixes = ('#', '&', '+', '!') # Can be implementation-dependent _regex_type = type(re.compile('')) def deprecated( reason=None, version=None, removed_in=None, warning_in=None, func=None, ): """Decorator to mark deprecated functions in Sopel's API :param str reason: optional text added to the deprecation warning :param str version: optional version number when the decorated function is deprecated :param str removed_in: optional version number when the deprecated function will be removed :param str warning_in: optional version number when the decorated function should start emitting a warning when called :param callable func: deprecated function :return: a callable that depends on how the decorator is called; either the decorated function, or a decorator with the appropriate parameters Any time the decorated ``func`` is called, a deprecation warning will be printed to ``sys.stderr``, with the last frame of the traceback. The optional ``warning_in`` argument suppresses the warning on Sopel versions older than that, allowing for multi-stage deprecation timelines. The decorator can be used with or without arguments:: from sopel import tools @tools.deprecated def func1(): print('func 1') @tools.deprecated() def func2(): print('func 2') @tools.deprecated(reason='obsolete', version='7.0', removed_in='8.0') def func3(): print('func 3') which will output the following in a console:: >>> func1() Deprecated: func1 File "<stdin>", line 1, in <module> func 1 >>> func2() Deprecated: func2 File "<stdin>", line 1, in <module> func 2 >>> func3() Deprecated since 7.0, will be removed in 8.0: obsolete File "<stdin>", line 1, in <module> func 3 .. note:: There is nothing that prevents this decorator to be used on a class's method, or on any existing callable. .. versionadded:: 7.0 Parameters ``reason``, ``version``, and ``removed_in``. .. versionadded:: 7.1 The ``warning_in`` parameter. """ if not any([reason, version, removed_in, warning_in, func]): # common usage: @deprecated() return deprecated if callable(reason): # common usage: @deprecated return deprecated(func=reason) if func is None: # common usage: @deprecated(message, version, removed_in) return decorator # now, we have everything we need to have: # - message is not a callable (could be None) # - func is not None # - version and removed_in can be None but that's OK # so now we can return the actual decorated function message = reason or getattr(func, '__name__', '<anonymous-function>') template = 'Deprecated: {message}' if version and removed_in: template = ( 'Deprecated since {version}, ' 'will be removed in {removed_in}: ' '{message}') elif version: template = 'Deprecated since {version}: {message}' elif removed_in: template = 'Deprecated, will be removed in {removed_in}: {message}' text = template.format( message=message, version=version, removed_in=removed_in) @functools.wraps(func) return deprecated_func @deprecated('Shim for Python 2 cross-compatibility, no longer needed. ' 'Use built-in `input()` instead.', version='7.1', warning_in='8.0', removed_in='8.1') def get_input(prompt): """Get decoded input from the terminal (equivalent to Python 3's ``input``). :param str prompt: what to display as a prompt on the terminal :return: the user's input :rtype: str .. deprecated:: 7.1 Use of this function will become a warning when Python 2 support is dropped in Sopel 8.0. The function will be removed in Sopel 8.1. """ if sys.version_info.major >= 3: return input(prompt) else: return raw_input(prompt).decode('utf8') def compile_rule(nick, pattern, alias_nicks): """Compile a rule regex and fill in nickname placeholders. :param str nick: the nickname to use when replacing ``$nick`` and ``$nickname`` placeholders in the ``pattern`` :param str pattern: the rule regex pattern :param list alias_nicks: a list of alternatives that should also be accepted instead of ``nick`` :return: the compiled regex ``pattern``, with placeholders for ``$nick`` and ``$nickname`` filled in :rtype: :ref:`re.Pattern <python:re-objects>` Will not recompile an already compiled pattern. """ # Not sure why this happens on reloads, but it shouldn't cause problems… if isinstance(pattern, _regex_type): return pattern if alias_nicks: nicks = list(alias_nicks) # alias_nicks.copy() doesn't work in py2 nicks.append(nick) nicks = map(re.escape, nicks) nick = '(?:%s)' % '|'.join(nicks) else: nick = re.escape(nick) pattern = pattern.replace('$nickname', nick) pattern = pattern.replace('$nick ', r'{}[,:]\s*'.format(nick)) # @rule('$nick hi') pattern = pattern.replace('$nick', r'{}[,:]\s+'.format(nick)) # @rule('$nickhi') flags = re.IGNORECASE if '\n' in pattern: # See https://docs.python.org/3/library/re.html#re.VERBOSE flags |= re.VERBOSE return re.compile(pattern, flags) def get_command_regexp(prefix, command): """Get a compiled regexp object that implements the command. :param str prefix: the command prefix (interpreted as regex) :param str command: the name of the command :return: a compiled regexp object that implements the command :rtype: :ref:`re.Pattern <python:re-objects>` """ # Escape all whitespace with a single backslash. This ensures that regexp # in the prefix is treated as it was before the actual regexp was changed # to use the verbose syntax. prefix = re.sub(r"(\s)", r"\\\1", prefix) pattern = get_command_pattern(prefix, command) return re.compile(pattern, re.IGNORECASE | re.VERBOSE) def get_command_pattern(prefix, command): """Get the uncompiled regex pattern for standard commands. :param str prefix: the command prefix (interpreted as regex) :param str command: the command name :return: a regex pattern that will match the given command :rtype: str """ # This regexp matches equivalently and produces the same # groups 1 and 2 as the old regexp: r'^%s(%s)(?: +(.*))?$' # The only differences should be handling all whitespace # like spaces and the addition of groups 3-6. return r""" (?:{prefix})({command}) # Command as group 1. (?:\s+ # Whitespace to end command. ( # Rest of the line as group 2. (?:(\S+))? # Parameters 1-4 as groups 3-6. (?:\s+(\S+))? (?:\s+(\S+))? (?:\s+(\S+))? .* # Accept anything after the parameters. # Leave it up to the plugin to parse # the line. ))? # Group 2 must be None, if there are no # parameters. $ # EoL, so there are no partial matches. """.format(prefix=prefix, command=command) def get_nickname_command_regexp(nick, command, alias_nicks): """Get a compiled regexp object that implements the nickname command. :param str nick: the bot's nickname :param str command: the command name :param list alias_nicks: a list of alternatives that should also be accepted instead of ``nick`` :return: a compiled regex pattern that implements the given nickname command :rtype: :ref:`re.Pattern <python:re-objects>` """ if isinstance(alias_nicks, unicode): alias_nicks = [alias_nicks] elif not isinstance(alias_nicks, (list, tuple)): raise ValueError('A list or string is required.') return compile_rule(nick, get_nickname_command_pattern(command), alias_nicks) def get_nickname_command_pattern(command): """Get the uncompiled regex pattern for a nickname command. :param str command: the command name :return: a regex pattern that will match the given nickname command :rtype: str """ return r""" ^ $nickname[:,]? # Nickname. \s+({command}) # Command as group 1. (?:\s+ # Whitespace to end command. ( # Rest of the line as group 2. (?:(\S+))? # Parameters 1-4 as groups 3-6. (?:\s+(\S+))? (?:\s+(\S+))? (?:\s+(\S+))? .* # Accept anything after the parameters. Leave it up to # the plugin to parse the line. ))? # Group 1 must be None, if there are no parameters. $ # EoL, so there are no partial matches. """.format(command=command) def get_action_command_regexp(command): """Get a compiled regexp object that implements the command. :param str command: the name of the command :return: a compiled regexp object that implements the command :rtype: :ref:`re.Pattern <python:re-objects>` """ pattern = get_action_command_pattern(command) return re.compile(pattern, re.IGNORECASE | re.VERBOSE) def get_action_command_pattern(command): """Get the uncompiled regex pattern for action commands. :param str command: the command name :return: a regex pattern that will match the given command :rtype: str """ # This regexp matches equivalently and produces the same # groups 1 and 2 as the old regexp: r'^%s(%s)(?: +(.*))?$' # The only differences should be handling all whitespace # like spaces and the addition of groups 3-6. return r""" ({command}) # Command as group 1. (?:\s+ # Whitespace to end command. ( # Rest of the line as group 2. (?:(\S+))? # Parameters 1-4 as groups 3-6. (?:\s+(\S+))? (?:\s+(\S+))? (?:\s+(\S+))? .* # Accept anything after the parameters. # Leave it up to the plugin to parse # the line. ))? # Group 2 must be None, if there are no # parameters. $ # EoL, so there are no partial matches. """.format(command=command) def get_sendable_message(text, max_length=400): """Get a sendable ``text`` message, with its excess when needed. :param str txt: text to send (expects Unicode-encoded string) :param int max_length: maximum length of the message to be sendable :return: a tuple of two values, the sendable text and its excess text :rtype: (str, str) We're arbitrarily saying that the max is 400 bytes of text when messages will be split. Otherwise, we'd have to account for the bot's hostmask, which is hard. The ``max_length`` is the max length of text in **bytes**, but we take care of Unicode 2-byte characters by working on the Unicode string, then making sure the bytes version is smaller than the max length. """ unicode_max_length = max_length excess = '' while len(text.encode('utf-8')) > max_length: last_space = text.rfind(' ', 0, unicode_max_length) if last_space == -1: # No last space, just split where it is possible excess = text[unicode_max_length:] + excess text = text[:unicode_max_length] # Decrease max length for the unicode string unicode_max_length = unicode_max_length - 1 else: # Split at the last best space found excess = text[last_space:] text = text[:last_space] return text, excess.lstrip() # This class was useful before Python 2.5, when ``defaultdict`` was added # to the built-in ``collections`` module. # It is now deprecated. class Ddict(dict): """A default dict implementation available for Python 2.x support. It was used to make multi-dimensional ``dict``\\s easy to use when the bot worked with Python version < 2.5. .. deprecated:: 7.0 Use :class:`collections.defaultdict` instead. """ @deprecated('use "collections.defaultdict" instead', '7.0', '8.0') class Identifier(unicode): """A `unicode` subclass which acts appropriately for IRC identifiers. When used as normal `unicode` objects, case will be preserved. However, when comparing two Identifier objects, or comparing a Identifier object with a `unicode` object, the comparison will be case insensitive. This case insensitivity includes the case convention conventions regarding ``[]``, ``{}``, ``|``, ``\\``, ``^`` and ``~`` described in RFC 2812. """ # May want to tweak this and update documentation accordingly when dropping # Python 2 support, since in py3 plain str is Unicode and a "unicode" type # no longer exists. Probably lots of code will need tweaking, tbh. def lower(self): """Get the RFC 2812-compliant lowercase version of this identifier. :return: RFC 2812-compliant lowercase version of the :py:class:`Identifier` instance :rtype: str """ return self._lowered @staticmethod def _lower(identifier): """Convert an identifier to lowercase per RFC 2812. :param str identifier: the identifier (nickname or channel) to convert :return: RFC 2812-compliant lowercase version of ``identifier`` :rtype: str """ if isinstance(identifier, Identifier): return identifier._lowered # The tilde replacement isn't needed for identifiers, but is for # channels, which may be useful at some point in the future. low = identifier.lower().replace('[', '{').replace(']', '}') low = low.replace('\\', '|').replace('~', '^') return low @staticmethod def _lower_swapped(identifier): """Backward-compatible version of :meth:`_lower`. :param str identifier: the identifier (nickname or channel) to convert :return: RFC 2812-non-compliant lowercase version of ``identifier`` :rtype: str This is what the old :meth:`_lower` function did before Sopel 7.0. It maps ``{}``, ``[]``, ``|``, ``\\``, ``^``, and ``~`` incorrectly. You shouldn't use this unless you need to migrate stored values from the previous, incorrect "lowercase" representation to the correct one. """ # The tilde replacement isn't needed for identifiers, but is for # channels, which may be useful at some point in the future. low = identifier.lower().replace('{', '[').replace('}', ']') low = low.replace('|', '\\').replace('^', '~') return low def is_nick(self): """Check if the Identifier is a nickname (i.e. not a channel) :return: ``True`` if this :py:class:`Identifier` is a nickname; ``False`` if it appears to be a channel """ return self and not self.startswith(_channel_prefixes) class OutputRedirect(object): """Redirect the output to the terminal and a log file. A simplified object used to write to both the terminal and a log file. """ def __init__(self, logpath, stderr=False, quiet=False): """Create an object which will log to both a file and the terminal. :param str logpath: path to the log file :param bool stderr: write output to stderr if ``True``, or to stdout otherwise :param bool quiet: write to the log file only if ``True`` (and not to the terminal) Create an object which will log to the file at ``logpath`` as well as the terminal. """ self.logpath = logpath self.stderr = stderr self.quiet = quiet def write(self, string): """Write the given ``string`` to the logfile and terminal. :param str string: the string to write """ if not self.quiet: try: if self.stderr: sys.__stderr__.write(string) else: sys.__stdout__.write(string) except Exception: # TODO: Be specific pass with codecs.open(self.logpath, 'ab', encoding="utf8", errors='xmlcharrefreplace') as logfile: try: logfile.write(string) except UnicodeDecodeError: # we got an invalid string, safely encode it to utf-8 logfile.write(unicode(string, 'utf8', errors="replace")) def flush(self): """Flush the file writing buffer.""" if self.stderr: sys.__stderr__.flush() else: sys.__stdout__.flush() # These seems to trace back to when we thought we needed a try/except on prints, # because it looked like that was why we were having problems. # We'll drop it in Sopel 8.0 because it has been here for far too long already. @deprecated('Use `print()` instead of sopel.tools.stdout', removed_in='8.0') def stderr(string): """Print the given ``string`` to stderr. :param str string: the string to output This is equivalent to ``print >> sys.stderr, string`` """ print(string, file=sys.stderr) def check_pid(pid): """Check if a process is running with the given ``PID``. :param int pid: PID to check :return bool: ``True`` if the given PID is running, ``False`` otherwise *Availability: POSIX systems only.* .. note:: Matching the :py:func:`os.kill` behavior this function needs on Windows was rejected in `Python issue #14480 <https://bugs.python.org/issue14480>`_, so :py:func:`check_pid` cannot be used on Windows systems. """ try: os.kill(pid, 0) except OSError: return False else: return True def get_hostmask_regex(mask): """Get a compiled regex pattern for an IRC hostmask :param str mask: the hostmask that the pattern should match :return: a compiled regex pattern matching the given ``mask`` :rtype: :ref:`re.Pattern <python:re-objects>` """ mask = re.escape(mask) mask = mask.replace(r'\*', '.*') return re.compile(mask + '$', re.I) def get_logger(plugin_name): """Return a logger for a plugin. :param str plugin_name: name of the plugin :return: the logger for the given plugin This:: from sopel import plugins LOGGER = plugins.get_logger('my_custom_plugin') is equivalent to this:: import logging LOGGER = logging.getLogger('sopel.externals.my_custom_plugin') Internally, Sopel configures logging for the ``sopel`` namespace, so external plugins can't benefit from it with ``logging.getLogger(__name__)`` as they won't be in the same namespace. This function uses the ``plugin_name`` with a prefix inside this namespace. """ return logging.getLogger('sopel.externals.%s' % plugin_name) class SopelMemory(dict): """A simple thread-safe ``dict`` implementation. In order to prevent exceptions when iterating over the values and changing them at the same time from different threads, we use a blocking lock in ``__setitem__`` and ``contains``. .. versionadded:: 3.1 As ``Willie.WillieMemory`` .. versionchanged:: 4.0 Moved to ``tools.WillieMemory`` .. versionchanged:: 6.0 Renamed from ``WillieMemory`` to ``SopelMemory`` """ def __setitem__(self, key, value): """Set a key equal to a value. The dict is locked for other writes while doing so. """ self.lock.acquire() result = dict.__setitem__(self, key, value) self.lock.release() return result def __contains__(self, key): """Check if a key is in the dict. The dict is locked for writes while doing so. """ self.lock.acquire() result = dict.__contains__(self, key) self.lock.release() return result # Needed to make it explicit that we don't care about the `lock` attribute # when comparing/hashing SopelMemory objects. __eq__ = dict.__eq__ __ne__ = dict.__ne__ __hash__ = dict.__hash__ @deprecated def contains(self, key): """Check if ``key`` is in the memory :param str key: key to check for .. deprecated:: 7.0 Will be removed in Sopel 8. If you aren't already using the ``in`` operator, you should be. """ return self.__contains__(key) class SopelMemoryWithDefault(defaultdict): """Same as SopelMemory, but subclasses from collections.defaultdict. .. versionadded:: 4.3 As ``WillieMemoryWithDefault`` .. versionchanged:: 6.0 Renamed to ``SopelMemoryWithDefault`` """ def __setitem__(self, key, value): """Set a key equal to a value. The dict is locked for other writes while doing so. """ self.lock.acquire() result = defaultdict.__setitem__(self, key, value) self.lock.release() return result def __contains__(self, key): """Check if a key is in the dict. The dict is locked for writes while doing so. """ self.lock.acquire() result = defaultdict.__contains__(self, key) self.lock.release() return result @deprecated def contains(self, key): """Check if ``key`` is in the memory :param str key: key to check for .. deprecated:: 7.0 Will be removed in Sopel 8. If you aren't already using the ``in`` operator, you should be. """ return self.__contains__(key) class SopelIdentifierMemory(SopelMemory): """Special Sopel memory that stores ``Identifier`` as key. This is a convenient subclass of :class:`SopelMemory` that always casts its keys as instances of :class:`Identifier`:: >>> from sopel import tools >>> memory = tools.SopelIdentifierMemory() >>> memory['Exirel'] = 'king' >>> list(memory.items()) [(Identifier('Exirel'), 'king')] >>> tools.Identifier('exirel') in memory True >>> 'exirel' in memory True As seen in the example above, it is possible to perform various operations with both ``Identifier`` and :class:`str` objects, taking advantage of the case-insensitive behavior of ``Identifier``. .. note:: Internally, it will try to do ``key = tools.Identifier(key)``, which will raise an exception if it cannot instantiate the key properly:: >>> memory[1] = 'error' AttributeError: 'int' object has no attribute 'lower' .. versionadded:: 7.1 """ @deprecated(version='7.0', removed_in='8.0') def get_raising_file_and_line(tb=None): """Get the file and line number where an exception happened. :param tb: the traceback (uses the most recent exception if not given) :return: a tuple of the filename and line number :rtype: (str, int) .. deprecated:: 7.0 Use Python's built-in logging system, with the ``logger.exception`` method. This method makes sure to log the exception with the traceback and the relevant information (filename, line number, etc.). """ if not tb: tb = sys.exc_info()[2] filename, lineno, _context, _line = traceback.extract_tb(tb)[-1] return filename, lineno def chain_loaders(*lazy_loaders): """Chain lazy loaders into one. :param lazy_loaders: one or more lazy loader functions :type lazy_loaders: :term:`function` :return: a lazy loader that combines all of the given ones :rtype: :term:`function` This function takes any number of lazy loaders as arguments and merges them together into one. It's primarily a helper for lazy rule decorators such as :func:`sopel.plugin.url_lazy`. .. important:: This function doesn't check the uniqueness of regexes generated by all the loaders. """ return chained_loader
34.103448
87
0.62055
# coding=utf-8 """Useful miscellaneous tools and shortcuts for Sopel plugins *Availability: 3+* """ # tools.py - Sopel misc tools # Copyright 2008, Sean B. Palmer, inamidst.com # Copyright © 2012, Elad Alfassa <elad@fedoraproject.org> # Copyright 2012, Elsie Powell, embolalia.com # Licensed under the Eiffel Forum License 2. # https://sopel.chat from __future__ import absolute_import, division, print_function, unicode_literals import codecs from collections import defaultdict import functools import logging import os import re import sys import threading import traceback from pkg_resources import parse_version from sopel import __version__ from sopel.tools._events import events # NOQA if sys.version_info.major >= 3: raw_input = input unicode = str iteritems = dict.items itervalues = dict.values iterkeys = dict.keys else: iteritems = dict.iteritems itervalues = dict.itervalues iterkeys = dict.iterkeys _channel_prefixes = ('#', '&', '+', '!') # Can be implementation-dependent _regex_type = type(re.compile('')) def deprecated( reason=None, version=None, removed_in=None, warning_in=None, func=None, ): """Decorator to mark deprecated functions in Sopel's API :param str reason: optional text added to the deprecation warning :param str version: optional version number when the decorated function is deprecated :param str removed_in: optional version number when the deprecated function will be removed :param str warning_in: optional version number when the decorated function should start emitting a warning when called :param callable func: deprecated function :return: a callable that depends on how the decorator is called; either the decorated function, or a decorator with the appropriate parameters Any time the decorated ``func`` is called, a deprecation warning will be printed to ``sys.stderr``, with the last frame of the traceback. The optional ``warning_in`` argument suppresses the warning on Sopel versions older than that, allowing for multi-stage deprecation timelines. The decorator can be used with or without arguments:: from sopel import tools @tools.deprecated def func1(): print('func 1') @tools.deprecated() def func2(): print('func 2') @tools.deprecated(reason='obsolete', version='7.0', removed_in='8.0') def func3(): print('func 3') which will output the following in a console:: >>> func1() Deprecated: func1 File "<stdin>", line 1, in <module> func 1 >>> func2() Deprecated: func2 File "<stdin>", line 1, in <module> func 2 >>> func3() Deprecated since 7.0, will be removed in 8.0: obsolete File "<stdin>", line 1, in <module> func 3 .. note:: There is nothing that prevents this decorator to be used on a class's method, or on any existing callable. .. versionadded:: 7.0 Parameters ``reason``, ``version``, and ``removed_in``. .. versionadded:: 7.1 The ``warning_in`` parameter. """ if not any([reason, version, removed_in, warning_in, func]): # common usage: @deprecated() return deprecated if callable(reason): # common usage: @deprecated return deprecated(func=reason) if func is None: # common usage: @deprecated(message, version, removed_in) def decorator(func): return deprecated(reason, version, removed_in, warning_in, func) return decorator # now, we have everything we need to have: # - message is not a callable (could be None) # - func is not None # - version and removed_in can be None but that's OK # so now we can return the actual decorated function message = reason or getattr(func, '__name__', '<anonymous-function>') template = 'Deprecated: {message}' if version and removed_in: template = ( 'Deprecated since {version}, ' 'will be removed in {removed_in}: ' '{message}') elif version: template = 'Deprecated since {version}: {message}' elif removed_in: template = 'Deprecated, will be removed in {removed_in}: {message}' text = template.format( message=message, version=version, removed_in=removed_in) @functools.wraps(func) def deprecated_func(*args, **kwargs): if not (warning_in and parse_version(warning_in) >= parse_version(__version__)): stderr(text) # Only display the last frame trace = traceback.extract_stack() stderr(traceback.format_list(trace[:-1])[-1][:-1]) return func(*args, **kwargs) return deprecated_func @deprecated('Shim for Python 2 cross-compatibility, no longer needed. ' 'Use built-in `input()` instead.', version='7.1', warning_in='8.0', removed_in='8.1') def get_input(prompt): """Get decoded input from the terminal (equivalent to Python 3's ``input``). :param str prompt: what to display as a prompt on the terminal :return: the user's input :rtype: str .. deprecated:: 7.1 Use of this function will become a warning when Python 2 support is dropped in Sopel 8.0. The function will be removed in Sopel 8.1. """ if sys.version_info.major >= 3: return input(prompt) else: return raw_input(prompt).decode('utf8') def compile_rule(nick, pattern, alias_nicks): """Compile a rule regex and fill in nickname placeholders. :param str nick: the nickname to use when replacing ``$nick`` and ``$nickname`` placeholders in the ``pattern`` :param str pattern: the rule regex pattern :param list alias_nicks: a list of alternatives that should also be accepted instead of ``nick`` :return: the compiled regex ``pattern``, with placeholders for ``$nick`` and ``$nickname`` filled in :rtype: :ref:`re.Pattern <python:re-objects>` Will not recompile an already compiled pattern. """ # Not sure why this happens on reloads, but it shouldn't cause problems… if isinstance(pattern, _regex_type): return pattern if alias_nicks: nicks = list(alias_nicks) # alias_nicks.copy() doesn't work in py2 nicks.append(nick) nicks = map(re.escape, nicks) nick = '(?:%s)' % '|'.join(nicks) else: nick = re.escape(nick) pattern = pattern.replace('$nickname', nick) pattern = pattern.replace('$nick ', r'{}[,:]\s*'.format(nick)) # @rule('$nick hi') pattern = pattern.replace('$nick', r'{}[,:]\s+'.format(nick)) # @rule('$nickhi') flags = re.IGNORECASE if '\n' in pattern: # See https://docs.python.org/3/library/re.html#re.VERBOSE flags |= re.VERBOSE return re.compile(pattern, flags) def get_command_regexp(prefix, command): """Get a compiled regexp object that implements the command. :param str prefix: the command prefix (interpreted as regex) :param str command: the name of the command :return: a compiled regexp object that implements the command :rtype: :ref:`re.Pattern <python:re-objects>` """ # Escape all whitespace with a single backslash. This ensures that regexp # in the prefix is treated as it was before the actual regexp was changed # to use the verbose syntax. prefix = re.sub(r"(\s)", r"\\\1", prefix) pattern = get_command_pattern(prefix, command) return re.compile(pattern, re.IGNORECASE | re.VERBOSE) def get_command_pattern(prefix, command): """Get the uncompiled regex pattern for standard commands. :param str prefix: the command prefix (interpreted as regex) :param str command: the command name :return: a regex pattern that will match the given command :rtype: str """ # This regexp matches equivalently and produces the same # groups 1 and 2 as the old regexp: r'^%s(%s)(?: +(.*))?$' # The only differences should be handling all whitespace # like spaces and the addition of groups 3-6. return r""" (?:{prefix})({command}) # Command as group 1. (?:\s+ # Whitespace to end command. ( # Rest of the line as group 2. (?:(\S+))? # Parameters 1-4 as groups 3-6. (?:\s+(\S+))? (?:\s+(\S+))? (?:\s+(\S+))? .* # Accept anything after the parameters. # Leave it up to the plugin to parse # the line. ))? # Group 2 must be None, if there are no # parameters. $ # EoL, so there are no partial matches. """.format(prefix=prefix, command=command) def get_nickname_command_regexp(nick, command, alias_nicks): """Get a compiled regexp object that implements the nickname command. :param str nick: the bot's nickname :param str command: the command name :param list alias_nicks: a list of alternatives that should also be accepted instead of ``nick`` :return: a compiled regex pattern that implements the given nickname command :rtype: :ref:`re.Pattern <python:re-objects>` """ if isinstance(alias_nicks, unicode): alias_nicks = [alias_nicks] elif not isinstance(alias_nicks, (list, tuple)): raise ValueError('A list or string is required.') return compile_rule(nick, get_nickname_command_pattern(command), alias_nicks) def get_nickname_command_pattern(command): """Get the uncompiled regex pattern for a nickname command. :param str command: the command name :return: a regex pattern that will match the given nickname command :rtype: str """ return r""" ^ $nickname[:,]? # Nickname. \s+({command}) # Command as group 1. (?:\s+ # Whitespace to end command. ( # Rest of the line as group 2. (?:(\S+))? # Parameters 1-4 as groups 3-6. (?:\s+(\S+))? (?:\s+(\S+))? (?:\s+(\S+))? .* # Accept anything after the parameters. Leave it up to # the plugin to parse the line. ))? # Group 1 must be None, if there are no parameters. $ # EoL, so there are no partial matches. """.format(command=command) def get_action_command_regexp(command): """Get a compiled regexp object that implements the command. :param str command: the name of the command :return: a compiled regexp object that implements the command :rtype: :ref:`re.Pattern <python:re-objects>` """ pattern = get_action_command_pattern(command) return re.compile(pattern, re.IGNORECASE | re.VERBOSE) def get_action_command_pattern(command): """Get the uncompiled regex pattern for action commands. :param str command: the command name :return: a regex pattern that will match the given command :rtype: str """ # This regexp matches equivalently and produces the same # groups 1 and 2 as the old regexp: r'^%s(%s)(?: +(.*))?$' # The only differences should be handling all whitespace # like spaces and the addition of groups 3-6. return r""" ({command}) # Command as group 1. (?:\s+ # Whitespace to end command. ( # Rest of the line as group 2. (?:(\S+))? # Parameters 1-4 as groups 3-6. (?:\s+(\S+))? (?:\s+(\S+))? (?:\s+(\S+))? .* # Accept anything after the parameters. # Leave it up to the plugin to parse # the line. ))? # Group 2 must be None, if there are no # parameters. $ # EoL, so there are no partial matches. """.format(command=command) def get_sendable_message(text, max_length=400): """Get a sendable ``text`` message, with its excess when needed. :param str txt: text to send (expects Unicode-encoded string) :param int max_length: maximum length of the message to be sendable :return: a tuple of two values, the sendable text and its excess text :rtype: (str, str) We're arbitrarily saying that the max is 400 bytes of text when messages will be split. Otherwise, we'd have to account for the bot's hostmask, which is hard. The ``max_length`` is the max length of text in **bytes**, but we take care of Unicode 2-byte characters by working on the Unicode string, then making sure the bytes version is smaller than the max length. """ unicode_max_length = max_length excess = '' while len(text.encode('utf-8')) > max_length: last_space = text.rfind(' ', 0, unicode_max_length) if last_space == -1: # No last space, just split where it is possible excess = text[unicode_max_length:] + excess text = text[:unicode_max_length] # Decrease max length for the unicode string unicode_max_length = unicode_max_length - 1 else: # Split at the last best space found excess = text[last_space:] text = text[:last_space] return text, excess.lstrip() # This class was useful before Python 2.5, when ``defaultdict`` was added # to the built-in ``collections`` module. # It is now deprecated. class Ddict(dict): """A default dict implementation available for Python 2.x support. It was used to make multi-dimensional ``dict``\\s easy to use when the bot worked with Python version < 2.5. .. deprecated:: 7.0 Use :class:`collections.defaultdict` instead. """ @deprecated('use "collections.defaultdict" instead', '7.0', '8.0') def __init__(self, default=None): self.default = default def __getitem__(self, key): if key not in self: self[key] = self.default() return dict.__getitem__(self, key) class Identifier(unicode): """A `unicode` subclass which acts appropriately for IRC identifiers. When used as normal `unicode` objects, case will be preserved. However, when comparing two Identifier objects, or comparing a Identifier object with a `unicode` object, the comparison will be case insensitive. This case insensitivity includes the case convention conventions regarding ``[]``, ``{}``, ``|``, ``\\``, ``^`` and ``~`` described in RFC 2812. """ # May want to tweak this and update documentation accordingly when dropping # Python 2 support, since in py3 plain str is Unicode and a "unicode" type # no longer exists. Probably lots of code will need tweaking, tbh. def __new__(cls, identifier): # According to RFC2812, identifiers have to be in the ASCII range. # However, I think it's best to let the IRCd determine that, and we'll # just assume unicode. It won't hurt anything, and is more internally # consistent. And who knows, maybe there's another use case for this # weird case convention. s = unicode.__new__(cls, identifier) s._lowered = Identifier._lower(identifier) return s def lower(self): """Get the RFC 2812-compliant lowercase version of this identifier. :return: RFC 2812-compliant lowercase version of the :py:class:`Identifier` instance :rtype: str """ return self._lowered @staticmethod def _lower(identifier): """Convert an identifier to lowercase per RFC 2812. :param str identifier: the identifier (nickname or channel) to convert :return: RFC 2812-compliant lowercase version of ``identifier`` :rtype: str """ if isinstance(identifier, Identifier): return identifier._lowered # The tilde replacement isn't needed for identifiers, but is for # channels, which may be useful at some point in the future. low = identifier.lower().replace('[', '{').replace(']', '}') low = low.replace('\\', '|').replace('~', '^') return low @staticmethod def _lower_swapped(identifier): """Backward-compatible version of :meth:`_lower`. :param str identifier: the identifier (nickname or channel) to convert :return: RFC 2812-non-compliant lowercase version of ``identifier`` :rtype: str This is what the old :meth:`_lower` function did before Sopel 7.0. It maps ``{}``, ``[]``, ``|``, ``\\``, ``^``, and ``~`` incorrectly. You shouldn't use this unless you need to migrate stored values from the previous, incorrect "lowercase" representation to the correct one. """ # The tilde replacement isn't needed for identifiers, but is for # channels, which may be useful at some point in the future. low = identifier.lower().replace('{', '[').replace('}', ']') low = low.replace('|', '\\').replace('^', '~') return low def __repr__(self): return "%s(%r)" % ( self.__class__.__name__, self.__str__() ) def __hash__(self): return self._lowered.__hash__() def __lt__(self, other): if isinstance(other, unicode): other = Identifier._lower(other) return unicode.__lt__(self._lowered, other) def __le__(self, other): if isinstance(other, unicode): other = Identifier._lower(other) return unicode.__le__(self._lowered, other) def __gt__(self, other): if isinstance(other, unicode): other = Identifier._lower(other) return unicode.__gt__(self._lowered, other) def __ge__(self, other): if isinstance(other, unicode): other = Identifier._lower(other) return unicode.__ge__(self._lowered, other) def __eq__(self, other): if isinstance(other, unicode): other = Identifier._lower(other) return unicode.__eq__(self._lowered, other) def __ne__(self, other): return not (self == other) def is_nick(self): """Check if the Identifier is a nickname (i.e. not a channel) :return: ``True`` if this :py:class:`Identifier` is a nickname; ``False`` if it appears to be a channel """ return self and not self.startswith(_channel_prefixes) class OutputRedirect(object): """Redirect the output to the terminal and a log file. A simplified object used to write to both the terminal and a log file. """ def __init__(self, logpath, stderr=False, quiet=False): """Create an object which will log to both a file and the terminal. :param str logpath: path to the log file :param bool stderr: write output to stderr if ``True``, or to stdout otherwise :param bool quiet: write to the log file only if ``True`` (and not to the terminal) Create an object which will log to the file at ``logpath`` as well as the terminal. """ self.logpath = logpath self.stderr = stderr self.quiet = quiet def write(self, string): """Write the given ``string`` to the logfile and terminal. :param str string: the string to write """ if not self.quiet: try: if self.stderr: sys.__stderr__.write(string) else: sys.__stdout__.write(string) except Exception: # TODO: Be specific pass with codecs.open(self.logpath, 'ab', encoding="utf8", errors='xmlcharrefreplace') as logfile: try: logfile.write(string) except UnicodeDecodeError: # we got an invalid string, safely encode it to utf-8 logfile.write(unicode(string, 'utf8', errors="replace")) def flush(self): """Flush the file writing buffer.""" if self.stderr: sys.__stderr__.flush() else: sys.__stdout__.flush() # These seems to trace back to when we thought we needed a try/except on prints, # because it looked like that was why we were having problems. # We'll drop it in Sopel 8.0 because it has been here for far too long already. @deprecated('Use `print()` instead of sopel.tools.stdout', removed_in='8.0') def stdout(string): print(string) def stderr(string): """Print the given ``string`` to stderr. :param str string: the string to output This is equivalent to ``print >> sys.stderr, string`` """ print(string, file=sys.stderr) def check_pid(pid): """Check if a process is running with the given ``PID``. :param int pid: PID to check :return bool: ``True`` if the given PID is running, ``False`` otherwise *Availability: POSIX systems only.* .. note:: Matching the :py:func:`os.kill` behavior this function needs on Windows was rejected in `Python issue #14480 <https://bugs.python.org/issue14480>`_, so :py:func:`check_pid` cannot be used on Windows systems. """ try: os.kill(pid, 0) except OSError: return False else: return True def get_hostmask_regex(mask): """Get a compiled regex pattern for an IRC hostmask :param str mask: the hostmask that the pattern should match :return: a compiled regex pattern matching the given ``mask`` :rtype: :ref:`re.Pattern <python:re-objects>` """ mask = re.escape(mask) mask = mask.replace(r'\*', '.*') return re.compile(mask + '$', re.I) def get_logger(plugin_name): """Return a logger for a plugin. :param str plugin_name: name of the plugin :return: the logger for the given plugin This:: from sopel import plugins LOGGER = plugins.get_logger('my_custom_plugin') is equivalent to this:: import logging LOGGER = logging.getLogger('sopel.externals.my_custom_plugin') Internally, Sopel configures logging for the ``sopel`` namespace, so external plugins can't benefit from it with ``logging.getLogger(__name__)`` as they won't be in the same namespace. This function uses the ``plugin_name`` with a prefix inside this namespace. """ return logging.getLogger('sopel.externals.%s' % plugin_name) class SopelMemory(dict): """A simple thread-safe ``dict`` implementation. In order to prevent exceptions when iterating over the values and changing them at the same time from different threads, we use a blocking lock in ``__setitem__`` and ``contains``. .. versionadded:: 3.1 As ``Willie.WillieMemory`` .. versionchanged:: 4.0 Moved to ``tools.WillieMemory`` .. versionchanged:: 6.0 Renamed from ``WillieMemory`` to ``SopelMemory`` """ def __init__(self, *args): dict.__init__(self, *args) self.lock = threading.Lock() def __setitem__(self, key, value): """Set a key equal to a value. The dict is locked for other writes while doing so. """ self.lock.acquire() result = dict.__setitem__(self, key, value) self.lock.release() return result def __contains__(self, key): """Check if a key is in the dict. The dict is locked for writes while doing so. """ self.lock.acquire() result = dict.__contains__(self, key) self.lock.release() return result # Needed to make it explicit that we don't care about the `lock` attribute # when comparing/hashing SopelMemory objects. __eq__ = dict.__eq__ __ne__ = dict.__ne__ __hash__ = dict.__hash__ @deprecated def contains(self, key): """Check if ``key`` is in the memory :param str key: key to check for .. deprecated:: 7.0 Will be removed in Sopel 8. If you aren't already using the ``in`` operator, you should be. """ return self.__contains__(key) class SopelMemoryWithDefault(defaultdict): """Same as SopelMemory, but subclasses from collections.defaultdict. .. versionadded:: 4.3 As ``WillieMemoryWithDefault`` .. versionchanged:: 6.0 Renamed to ``SopelMemoryWithDefault`` """ def __init__(self, *args): defaultdict.__init__(self, *args) self.lock = threading.Lock() def __setitem__(self, key, value): """Set a key equal to a value. The dict is locked for other writes while doing so. """ self.lock.acquire() result = defaultdict.__setitem__(self, key, value) self.lock.release() return result def __contains__(self, key): """Check if a key is in the dict. The dict is locked for writes while doing so. """ self.lock.acquire() result = defaultdict.__contains__(self, key) self.lock.release() return result @deprecated def contains(self, key): """Check if ``key`` is in the memory :param str key: key to check for .. deprecated:: 7.0 Will be removed in Sopel 8. If you aren't already using the ``in`` operator, you should be. """ return self.__contains__(key) class SopelIdentifierMemory(SopelMemory): """Special Sopel memory that stores ``Identifier`` as key. This is a convenient subclass of :class:`SopelMemory` that always casts its keys as instances of :class:`Identifier`:: >>> from sopel import tools >>> memory = tools.SopelIdentifierMemory() >>> memory['Exirel'] = 'king' >>> list(memory.items()) [(Identifier('Exirel'), 'king')] >>> tools.Identifier('exirel') in memory True >>> 'exirel' in memory True As seen in the example above, it is possible to perform various operations with both ``Identifier`` and :class:`str` objects, taking advantage of the case-insensitive behavior of ``Identifier``. .. note:: Internally, it will try to do ``key = tools.Identifier(key)``, which will raise an exception if it cannot instantiate the key properly:: >>> memory[1] = 'error' AttributeError: 'int' object has no attribute 'lower' .. versionadded:: 7.1 """ def __getitem__(self, key): return super(SopelIdentifierMemory, self).__getitem__(Identifier(key)) def __contains__(self, key): return super(SopelIdentifierMemory, self).__contains__(Identifier(key)) def __setitem__(self, key, value): super(SopelIdentifierMemory, self).__setitem__(Identifier(key), value) @deprecated(version='7.0', removed_in='8.0') def get_raising_file_and_line(tb=None): """Get the file and line number where an exception happened. :param tb: the traceback (uses the most recent exception if not given) :return: a tuple of the filename and line number :rtype: (str, int) .. deprecated:: 7.0 Use Python's built-in logging system, with the ``logger.exception`` method. This method makes sure to log the exception with the traceback and the relevant information (filename, line number, etc.). """ if not tb: tb = sys.exc_info()[2] filename, lineno, _context, _line = traceback.extract_tb(tb)[-1] return filename, lineno def chain_loaders(*lazy_loaders): """Chain lazy loaders into one. :param lazy_loaders: one or more lazy loader functions :type lazy_loaders: :term:`function` :return: a lazy loader that combines all of the given ones :rtype: :term:`function` This function takes any number of lazy loaders as arguments and merges them together into one. It's primarily a helper for lazy rule decorators such as :func:`sopel.plugin.url_lazy`. .. important:: This function doesn't check the uniqueness of regexes generated by all the loaders. """ def chained_loader(settings): return [ regex for lazy_loader in lazy_loaders for regex in lazy_loader(settings) ] return chained_loader
2,488
0
532
22713b1b406683d4ed6ad898aa63ba71a1a28ac4
3,520
py
Python
oracle-runs.py
fair-trec/trec2021-fair-public
d9f16bd7d70c350fa6e5906fac36c4f1af837573
[ "MIT" ]
3
2021-07-28T09:10:48.000Z
2022-03-23T15:08:48.000Z
oracle-runs.py
fair-trec/trec2021-fair-public
d9f16bd7d70c350fa6e5906fac36c4f1af837573
[ "MIT" ]
null
null
null
oracle-runs.py
fair-trec/trec2021-fair-public
d9f16bd7d70c350fa6e5906fac36c4f1af837573
[ "MIT" ]
null
null
null
""" Produce TREC fair ranking runs from an oracle. This script assumes the data lives in a directory 'data'. It loads the training topics and uses them as an oracle for producing rankings. Usage: oracle-runs.py --task1 [options] oracle-runs.py --task2 [options] Options: -v, --verbose Write verbose logging output. -o FILE Write output to FILE. -p PREC, --precision=PREC Produce results with the specified precision [default: 0.9]. """ import sys from pathlib import Path import logging from tqdm import tqdm from docopt import docopt import pandas as pd import numpy as np _log = logging.getLogger('oracle-runs') if __name__ == '__main__': opts = docopt(__doc__) main(opts)
27.716535
95
0.641477
""" Produce TREC fair ranking runs from an oracle. This script assumes the data lives in a directory 'data'. It loads the training topics and uses them as an oracle for producing rankings. Usage: oracle-runs.py --task1 [options] oracle-runs.py --task2 [options] Options: -v, --verbose Write verbose logging output. -o FILE Write output to FILE. -p PREC, --precision=PREC Produce results with the specified precision [default: 0.9]. """ import sys from pathlib import Path import logging from tqdm import tqdm from docopt import docopt import pandas as pd import numpy as np _log = logging.getLogger('oracle-runs') def load_metadata(): meta_f = Path('data/trec_metadata.json.gz') _log.info('reading %s', meta_f) meta = pd.read_json(meta_f, lines=True, compression='gzip') return meta.set_index('page_id') def load_topics(): topic_f = Path('data/trec_topics.json.gz') _log.info('reading %s', topic_f) topics = pd.read_json(topic_f, lines=True, compression='gzip') return topics def sample_docs(rng, meta, rel, n, prec): _log.debug('sampling %d rel items (n=%d, prec=%.2f)', len(rel), n, prec) n_rel = min(int(n * prec), len(rel)) n_unrel = n - n_rel rel = np.array(rel) all = pd.Series(meta.index) unrel = all[~all.isin(rel)].values samp_rel = rng.choice(rel, n_rel, replace=False) samp_unrel = rng.choice(unrel, n_unrel, replace=False) samp = np.concatenate([samp_rel, samp_unrel]) rng.shuffle(samp) return pd.Series(samp) def task1_run(opts, meta, topics): rng = np.random.default_rng() rank_len = 1000 prec = float(opts['--precision']) rels = topics[['id', 'rel_docs']].set_index('id').explode('rel_docs') def sample(df): return sample_docs(rng, meta, df['rel_docs'], rank_len, prec) runs = rels.groupby('id').apply(sample) runs.columns.name = 'rank' runs = runs.stack().reset_index(name='page_id') _log.info('sample runs:\n%s', runs) return runs[['id', 'page_id']] def task2_run(opts, meta, topics): rng = np.random.default_rng() rank_len = 50 run_count = 100 prec = float(opts['--precision']) rels = topics[['id', 'rel_docs']].set_index('id').explode('rel_docs') def one_sample(df): return sample_docs(rng, meta, df['rel_docs'], rank_len, prec) def multi_sample(df): runs = dict((i+1, one_sample(df)) for i in tqdm(range(run_count), 'reps', leave=False)) rdf = pd.DataFrame(runs) rdf.columns.name = 'seq_no' rdf.index.name = 'rank' return rdf.T runs = rels.groupby('id').progress_apply(multi_sample) runs = runs.stack().reset_index(name='page_id') _log.info('multi-sample runs:\n%s', runs) return runs[['id', 'seq_no', 'page_id']] def main(opts): level = logging.DEBUG if opts['--verbose'] else logging.INFO logging.basicConfig(stream=sys.stderr, level=level) tqdm.pandas(leave=False) meta = load_metadata() topics = load_topics() if opts['--task1']: runs = task1_run(opts, meta, topics) dft_out = 'task1.tsv' elif opts['--task2']: runs = task2_run(opts, meta, topics) dft_out = 'task2.tsv' else: raise ValueError('no task specified') out_file = opts.get('-o', dft_out) _log.info('writing to %s', out_file) runs.to_csv(out_file, index=False, sep='\t') if __name__ == '__main__': opts = docopt(__doc__) main(opts)
2,641
0
138
4b44ab6318cc359b020659d482e8f114bcd8d62d
6,394
py
Python
scan_list.py
jeremyng123/cobaltstrike
51a67ec918904d8a3b7ea07ab62a10b58183eb8f
[ "MIT" ]
193
2020-12-21T03:20:27.000Z
2022-02-28T11:11:08.000Z
scan_list.py
jeremyng123/cobaltstrike
51a67ec918904d8a3b7ea07ab62a10b58183eb8f
[ "MIT" ]
4
2021-01-09T11:27:57.000Z
2021-12-06T13:38:17.000Z
scan_list.py
jeremyng123/cobaltstrike
51a67ec918904d8a3b7ea07ab62a10b58183eb8f
[ "MIT" ]
54
2020-12-20T20:24:54.000Z
2022-02-28T11:11:09.000Z
import argparse import csv import requests import urllib3 from urllib.parse import urljoin from lib import decrypt_beacon, decode_config, JsonEncoder import multiprocessing from functools import partial import datetime import pandas as pd if __name__ == "__main__": parser = argparse.ArgumentParser(description='Extract Cobalt Strike beacon and configuration from a list of server') parser.add_argument('HOSTLIST', help='List of IP addresses or domains from a file') parser.add_argument('--PROCESS','-j', help='Number of process to be active simultaneously', default=10, type=int) parser.add_argument('--PORT','-p', help='Specify port on which scan will occur. Default: port 443', default=443, type=int) parser.add_argument('--BITS','-b', help='Specify which version of payload the script should request (32 or 64 bits). Default: 32', default=32, type=int, choices=[32, 64]) parser.add_argument('--HTTP', help='If specified, made request http and NOT https. Default : nothing', default=False, action='store_true') parser.add_argument('--format','-f', help='Specify format (csv or json). Default : csv', default="csv", choices=['csv', 'json']) args = parser.parse_args() ua = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36" urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) df = mp_handler(args.HOSTLIST, args.PROCESS, args.BITS, args.PORT, args.HTTP) header = ["host", "result", "ssl", "port", ".http-get.uri", ".http-post.uri", ".user-agent", ".watermark", "bits"] df["port"] = args.PORT df["ssl"] = not args.HTTP df["bits"] = args.BITS # Add columns if they are missing for h in header: if h not in df: df[h] = "" try: if args.format == "csv": df.to_csv(f'{datetime.date.today()}-{args.PORT}-test-output.csv', columns=header, index=False, doublequote=True, escapechar=",", quoting=csv.QUOTE_ALL) print(f'[+] Output success : {datetime.date.today()}-{args.PORT}-test-output.csv') else: df.to_json(f'{datetime.date.today()}-{args.PORT}-test-output.json', orient='records') print(f'[+] Output success : {datetime.date.today()}-{args.PORT}-test-output.json') except Exception as e: print("[-] Error during output : " + str(e))
43.202703
174
0.542853
import argparse import csv import requests import urllib3 from urllib.parse import urljoin from lib import decrypt_beacon, decode_config, JsonEncoder import multiprocessing from functools import partial import datetime import pandas as pd def mp_worker( BITS, PORT, HTTP,output_list, host ): print("Checking {}".format(host)) if not host.startswith("http"): if HTTP == True: host = "http://{0}:{1}".format(host, PORT) else : host = "https://{0}:{1}".format(host, PORT) if BITS == 64: uri = "/aad7" else : uri = "/aaa9" try: r = requests.get(urljoin(host, uri ), headers={'user-agent': ua}, verify=False, timeout=5) if r.status_code == 200: data = r.content if data.startswith(b"\xfc\xe8"): beacon = decrypt_beacon(data) if beacon: config = decode_config(beacon) if config: print(f"Payload {BITS} bits found") config["port"] = int(PORT) config["host"] = host config["result"] = "Found" output_list.append(config) else: config = dict() config["port"] = int(PORT) config["host"] = host config["result"] = "Config Extraction Failed" output_list.append(config) print("Config extraction failed") else: config = dict() print("Beacon extraction failed") config["port"] = int(PORT) config["host"] = host config["result"] = "Beacon Extraction Failed" output_list.append(config) elif data.startswith(b"MZ"): config = decode_config(data) if config: print(f"Payload {BITS} bits found") config["port"] = int(PORT) config["host"] = host config["result"] = "Found" output_list.append(config) else: config = dict() print("Config extraction failed") config["port"] = int(PORT) config["host"] = host config["result"] = "Config Extraction Failed" output_list.append(config) else: config = dict() print(f"No {BITS} bits payload") config["port"] = int(PORT) config["host"] = host config["result"] = "Not Found" output_list.append(config) else: config = dict() print(f"No {BITS} bits payload") config["port"] = int(PORT) config["host"] = host config["result"] = "Not Found" output_list.append(config) except Exception as e : print("Request failed : "+str(e)) config = dict() config["port"] = int(PORT) config["host"] = host config["result"] = "Request Failed" output_list.append(config) def mp_handler(HOSTLIST,PROCESS, BITS, PORT, HTTP): #Preparing multiprocessing with multiprocessing.Pool(PROCESS) as p: with multiprocessing.Manager() as manager: #Shared list between process output_list = manager.list() func = partial(mp_worker, BITS, PORT, HTTP, output_list) with open(HOSTLIST) as f: hosts = f.read().split('\n') hosts.remove('') #Multiprocessing p.imap(func, hosts) p.close() p.join() #Transform list into Pandas DF to facilitate output (json, csv, ....) real_list = list(output_list) df = pd.DataFrame(real_list) try: df[".watermark"] = df[".watermark"].astype('Int64', errors='ignore') except KeyError: # Not watermark identified at al df[".watermark"] = 0 return df if __name__ == "__main__": parser = argparse.ArgumentParser(description='Extract Cobalt Strike beacon and configuration from a list of server') parser.add_argument('HOSTLIST', help='List of IP addresses or domains from a file') parser.add_argument('--PROCESS','-j', help='Number of process to be active simultaneously', default=10, type=int) parser.add_argument('--PORT','-p', help='Specify port on which scan will occur. Default: port 443', default=443, type=int) parser.add_argument('--BITS','-b', help='Specify which version of payload the script should request (32 or 64 bits). Default: 32', default=32, type=int, choices=[32, 64]) parser.add_argument('--HTTP', help='If specified, made request http and NOT https. Default : nothing', default=False, action='store_true') parser.add_argument('--format','-f', help='Specify format (csv or json). Default : csv', default="csv", choices=['csv', 'json']) args = parser.parse_args() ua = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36" urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) df = mp_handler(args.HOSTLIST, args.PROCESS, args.BITS, args.PORT, args.HTTP) header = ["host", "result", "ssl", "port", ".http-get.uri", ".http-post.uri", ".user-agent", ".watermark", "bits"] df["port"] = args.PORT df["ssl"] = not args.HTTP df["bits"] = args.BITS # Add columns if they are missing for h in header: if h not in df: df[h] = "" try: if args.format == "csv": df.to_csv(f'{datetime.date.today()}-{args.PORT}-test-output.csv', columns=header, index=False, doublequote=True, escapechar=",", quoting=csv.QUOTE_ALL) print(f'[+] Output success : {datetime.date.today()}-{args.PORT}-test-output.csv') else: df.to_json(f'{datetime.date.today()}-{args.PORT}-test-output.json', orient='records') print(f'[+] Output success : {datetime.date.today()}-{args.PORT}-test-output.json') except Exception as e: print("[-] Error during output : " + str(e))
3,961
0
46
30ce9806dbf6f7dbafe0f568c2b2657bff6791b5
9,140
py
Python
ar-cnn/utils/midi_utils.py
carniblood/aws-deepcomposer-retrogame
6876eedd2a128362baff4daace73ee4f254676c3
[ "MIT-0" ]
null
null
null
ar-cnn/utils/midi_utils.py
carniblood/aws-deepcomposer-retrogame
6876eedd2a128362baff4daace73ee4f254676c3
[ "MIT-0" ]
null
null
null
ar-cnn/utils/midi_utils.py
carniblood/aws-deepcomposer-retrogame
6876eedd2a128362baff4daace73ee4f254676c3
[ "MIT-0" ]
null
null
null
# The MIT-Zero License # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import numpy as np from music21 import midi import pypianoroll from pypianoroll import Multitrack from texttable import Texttable from constants import Constants def process_midi(midi_file, beat_resolution, program=0, ignore_warnings=False): '''Takes path to an input midi file and parses it to pianoroll :param input_midi: Path to midi file :param beat_resolution :return: parsed painoroll ''' multi_track = pypianoroll.Multitrack(beat_resolution=beat_resolution) multi_track_onsets = multi_track.copy() try: multi_track.parse_midi(midi_file, skip_empty_tracks=False, collect_onsets_only=False, algorithm='custom', first_beat_time=0) multi_track_onsets.parse_midi(midi_file, skip_empty_tracks=False, collect_onsets_only=True, algorithm='custom', first_beat_time=0) except: print("ERROR - midi file: {} is invalid. Ignoring during preprocessing".format( midi_file)) pass # Merge regular pianoroll with onsets to make sure to not miss any notes for track_onset in multi_track_onsets.tracks: multi_track.append_track(track_onset) # Convert the PianoRoll to binary ignoring the values of velocities multi_track.pad_to_multiple(Constants.number_of_timesteps) multi_track.binarize() track_indices = [] # to merge all the voices track_indices_drum = [] # to merge drums for index,track in enumerate(multi_track.tracks): if track.is_drum: track_indices_drum.append(index) else: track_indices.append(index) if len(track_indices) == 0 and not ignore_warnings: print("WARNING - midi file: {} is empty. Ignoring during preprocessing".format(midi_file)) pass if len(track_indices_drum) == 0 and not ignore_warnings: print("WARNING - midi file: {} has no drum. Ignoring during preprocessing".format(midi_file)) pass multi_track.merge_tracks(track_indices=track_indices, mode='any', program=program, remove_merged=False) pianoroll = multi_track.tracks[-1].pianoroll drums = np.zeros((pianoroll.shape[0], 128), bool) if len(track_indices_drum) > 0: multi_track.merge_tracks(track_indices=track_indices_drum, mode='any', is_drum=True, remove_merged=False) drums = multi_track.tracks[-1].pianoroll return pianoroll,drums def process_pianoroll(pianoroll, drums, total_time_steps_shifted_per_sample, total_timesteps_per_nbars): '''Takes path to an input midi file and parses it to pianoroll :param pianoroll: pianoroll obtained after parsing midi file :param time_steps_shifted_per_sample: number of bars to be shifted in timesteps param timesteps_per_nbars: total number of timesteps to be included in processed pianoroll :return: parsed painoroll sections ''' if Constants.split_into_two_voices: time_steps_shifted_per_sample = total_time_steps_shifted_per_sample // 2 timesteps_per_nbars = total_timesteps_per_nbars // 2 else: time_steps_shifted_per_sample = total_time_steps_shifted_per_sample timesteps_per_nbars = total_timesteps_per_nbars # instead squeeze drums at the end of the pianoroll pianoroll_start = Constants.voices_maximum pianoroll_end = pianoroll_start + len(Constants.drums) pianoroll[:,pianoroll_start:pianoroll_end] = np.take(drums, axis=1, indices=Constants.drums) pianoroll[:,pianoroll_end:].fill(0) pianoroll_sections = [] truncated_pianoroll_length = pianoroll.shape[0]# - (pianoroll.shape[0] % timesteps_per_nbars) for i in range(0, truncated_pianoroll_length - timesteps_per_nbars + 1, time_steps_shifted_per_sample): if Constants.split_into_two_voices: pianoroll_section = pianoroll[i:i + timesteps_per_nbars, :] drum_section = drums[i:i + timesteps_per_nbars, :] section = np.concatenate((pianoroll_section, drum_section)) else: section = pianoroll[i:i + timesteps_per_nbars, :] if section.any(): pianoroll_sections.append(section) else: break return pianoroll_sections def play_midi(input_midi): '''Takes path to an input and plays the midi file in the notebook cell :param input_midi: Path to midi file :return: ''' midi_object = midi.MidiFile() midi_object.open(input_midi) midi_object.read() midi_object.close() show_midi = midi.translate.midiFileToStream(midi_object) show_midi.show('midi') def plot_pianoroll(pianoroll, beat_resolution, fig_name=None): '''Plot a Single Track Pianoroll image :param pianoroll: Pianoroll tensor of shape time_steps * pitches :return: ''' pypianoroll.plot_track(pypianoroll.Track(pianoroll=pianoroll), fig_name, beat_resolution) def get_music_metrics(input_midi, beat_resolution, track=0): """Takes a midifile as an input and Returns the following metrics :param input_midi: Path to midi file :param beat_resolution: :param trac: Instrument number in the multi track midi file :return: The following metrics are returned 1.) n_pitch_classes_used is the unique pitch classes used in a pianoroll. 2.) polyphonic_rate ie ratio of the number of time steps where the number of pitches being played is larger than `threshold` to the total number of time steps in a pianoroll 3.) in_scale_rate is the ratio of the number of nonzero entries that lie in a specific scale to the total number of nonzero entries in a pianoroll. 4.) n_pitches_used is the the number of unique pitches used in a pianoroll.""" midi_data = Multitrack(input_midi, beat_resolution) piano_roll = midi_data.tracks[track].pianoroll n_pitch_classes_used = pypianoroll.metrics.n_pitch_classes_used(piano_roll) polyphonic_rate = pypianoroll.metrics.polyphonic_rate(piano_roll) in_scale_rate = pypianoroll.metrics.in_scale_rate(piano_roll) n_pitches_used = pypianoroll.metrics.n_pitches_used(piano_roll) metrics = [ n_pitch_classes_used, polyphonic_rate, n_pitches_used, in_scale_rate ] metrics_table = [[ "n_pitch_classes_used", "in_scale_rate", "polyphonic_rate", "n_pitches_used" ], metrics] table = Texttable() table.add_rows(metrics_table) print(table.draw())
42.511628
101
0.659956
# The MIT-Zero License # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved. # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. import numpy as np from music21 import midi import pypianoroll from pypianoroll import Multitrack from texttable import Texttable from constants import Constants def print_midi_info(midi_file, all_programs_used=None, all_voices_used=None, all_drums_used=None): multi_track = pypianoroll.Multitrack() try: multi_track.parse_midi(midi_file, algorithm='custom', first_beat_time=0) except: print("ERROR - midi file: {} is invalid.".format( midi_file)) pass multi_track.binarize() for track in multi_track.tracks: is_drum = "" if track.is_drum: drums_used = np.nonzero(np.sum(track.pianoroll, axis=0))[0] is_drum = "[DRUM] " + str(drums_used) if all_drums_used is not None: all_drums_used.extend(drums_used) else: voices_used = np.nonzero(np.sum(track.pianoroll, axis=0))[0] if all_voices_used is not None: all_voices_used.extend(voices_used) if all_programs_used is not None: all_programs_used.append(track.program) print(" {}: {} {}".format(track.program, track.name, is_drum)) def process_midi(midi_file, beat_resolution, program=0, ignore_warnings=False): '''Takes path to an input midi file and parses it to pianoroll :param input_midi: Path to midi file :param beat_resolution :return: parsed painoroll ''' multi_track = pypianoroll.Multitrack(beat_resolution=beat_resolution) multi_track_onsets = multi_track.copy() try: multi_track.parse_midi(midi_file, skip_empty_tracks=False, collect_onsets_only=False, algorithm='custom', first_beat_time=0) multi_track_onsets.parse_midi(midi_file, skip_empty_tracks=False, collect_onsets_only=True, algorithm='custom', first_beat_time=0) except: print("ERROR - midi file: {} is invalid. Ignoring during preprocessing".format( midi_file)) pass # Merge regular pianoroll with onsets to make sure to not miss any notes for track_onset in multi_track_onsets.tracks: multi_track.append_track(track_onset) # Convert the PianoRoll to binary ignoring the values of velocities multi_track.pad_to_multiple(Constants.number_of_timesteps) multi_track.binarize() track_indices = [] # to merge all the voices track_indices_drum = [] # to merge drums for index,track in enumerate(multi_track.tracks): if track.is_drum: track_indices_drum.append(index) else: track_indices.append(index) if len(track_indices) == 0 and not ignore_warnings: print("WARNING - midi file: {} is empty. Ignoring during preprocessing".format(midi_file)) pass if len(track_indices_drum) == 0 and not ignore_warnings: print("WARNING - midi file: {} has no drum. Ignoring during preprocessing".format(midi_file)) pass multi_track.merge_tracks(track_indices=track_indices, mode='any', program=program, remove_merged=False) pianoroll = multi_track.tracks[-1].pianoroll drums = np.zeros((pianoroll.shape[0], 128), bool) if len(track_indices_drum) > 0: multi_track.merge_tracks(track_indices=track_indices_drum, mode='any', is_drum=True, remove_merged=False) drums = multi_track.tracks[-1].pianoroll return pianoroll,drums def process_pianoroll(pianoroll, drums, total_time_steps_shifted_per_sample, total_timesteps_per_nbars): '''Takes path to an input midi file and parses it to pianoroll :param pianoroll: pianoroll obtained after parsing midi file :param time_steps_shifted_per_sample: number of bars to be shifted in timesteps param timesteps_per_nbars: total number of timesteps to be included in processed pianoroll :return: parsed painoroll sections ''' if Constants.split_into_two_voices: time_steps_shifted_per_sample = total_time_steps_shifted_per_sample // 2 timesteps_per_nbars = total_timesteps_per_nbars // 2 else: time_steps_shifted_per_sample = total_time_steps_shifted_per_sample timesteps_per_nbars = total_timesteps_per_nbars # instead squeeze drums at the end of the pianoroll pianoroll_start = Constants.voices_maximum pianoroll_end = pianoroll_start + len(Constants.drums) pianoroll[:,pianoroll_start:pianoroll_end] = np.take(drums, axis=1, indices=Constants.drums) pianoroll[:,pianoroll_end:].fill(0) pianoroll_sections = [] truncated_pianoroll_length = pianoroll.shape[0]# - (pianoroll.shape[0] % timesteps_per_nbars) for i in range(0, truncated_pianoroll_length - timesteps_per_nbars + 1, time_steps_shifted_per_sample): if Constants.split_into_two_voices: pianoroll_section = pianoroll[i:i + timesteps_per_nbars, :] drum_section = drums[i:i + timesteps_per_nbars, :] section = np.concatenate((pianoroll_section, drum_section)) else: section = pianoroll[i:i + timesteps_per_nbars, :] if section.any(): pianoroll_sections.append(section) else: break return pianoroll_sections def play_midi(input_midi): '''Takes path to an input and plays the midi file in the notebook cell :param input_midi: Path to midi file :return: ''' midi_object = midi.MidiFile() midi_object.open(input_midi) midi_object.read() midi_object.close() show_midi = midi.translate.midiFileToStream(midi_object) show_midi.show('midi') def plot_pianoroll(pianoroll, beat_resolution, fig_name=None): '''Plot a Single Track Pianoroll image :param pianoroll: Pianoroll tensor of shape time_steps * pitches :return: ''' pypianoroll.plot_track(pypianoroll.Track(pianoroll=pianoroll), fig_name, beat_resolution) def get_music_metrics(input_midi, beat_resolution, track=0): """Takes a midifile as an input and Returns the following metrics :param input_midi: Path to midi file :param beat_resolution: :param trac: Instrument number in the multi track midi file :return: The following metrics are returned 1.) n_pitch_classes_used is the unique pitch classes used in a pianoroll. 2.) polyphonic_rate ie ratio of the number of time steps where the number of pitches being played is larger than `threshold` to the total number of time steps in a pianoroll 3.) in_scale_rate is the ratio of the number of nonzero entries that lie in a specific scale to the total number of nonzero entries in a pianoroll. 4.) n_pitches_used is the the number of unique pitches used in a pianoroll.""" midi_data = Multitrack(input_midi, beat_resolution) piano_roll = midi_data.tracks[track].pianoroll n_pitch_classes_used = pypianoroll.metrics.n_pitch_classes_used(piano_roll) polyphonic_rate = pypianoroll.metrics.polyphonic_rate(piano_roll) in_scale_rate = pypianoroll.metrics.in_scale_rate(piano_roll) n_pitches_used = pypianoroll.metrics.n_pitches_used(piano_roll) metrics = [ n_pitch_classes_used, polyphonic_rate, n_pitches_used, in_scale_rate ] metrics_table = [[ "n_pitch_classes_used", "in_scale_rate", "polyphonic_rate", "n_pitches_used" ], metrics] table = Texttable() table.add_rows(metrics_table) print(table.draw())
1,093
0
23
958a3b3c3c8bbbf71fdafc826be947de6c1e562c
2,740
py
Python
core/constraints/__init__.py
AluBhorta/UCSPy-Engine
917eea6ab98338c5763c9fd07e24da7fa2ee1cda
[ "MIT" ]
6
2021-09-29T06:18:55.000Z
2021-09-29T18:56:13.000Z
core/constraints/__init__.py
AluBhorta/UCSPy-Engine
917eea6ab98338c5763c9fd07e24da7fa2ee1cda
[ "MIT" ]
1
2021-10-01T00:00:15.000Z
2021-10-01T00:14:24.000Z
core/constraints/__init__.py
AluBhorta/UCSPy-Engine
917eea6ab98338c5763c9fd07e24da7fa2ee1cda
[ "MIT" ]
3
2020-06-26T05:59:56.000Z
2021-09-29T06:25:23.000Z
from core.constraints.hard.hard_constraint_1 import violates_hard_constraint_1 from core.constraints.hard.hard_constraint_2 import violates_hard_constraint_2 from core.constraints.soft.soft_constraint_1 import penalty_of_soft_constraint_1 from core.constraints.soft.soft_constraint_2 import penalty_of_soft_constraint_2 from core.constraints.soft.soft_constraint_3 import penalty_of_soft_constraint_3 from core.constraints.soft.soft_constraint_4 import penalty_of_soft_constraint_4 from core.constraints.soft.soft_constraint_5 import penalty_of_soft_constraint_5 from core.constraints.soft.soft_constraint_6 import penalty_of_soft_constraint_6 from core.constraints.soft.soft_constraint_7 import penalty_of_soft_constraint_7 from core.constraints.soft.soft_constraint_8 import penalty_of_soft_constraint_8 """ to learn how to add new constraints, read `core/constraints/modify_constraints.md` """ HARD_CONSTRAINT_FUNCS = [ { "id": 1, "func": violates_hard_constraint_1, "desc": "No two classes can take place in the same room at the same Timeslot." }, { "id": 2, "func": violates_hard_constraint_2, "desc": "No Instructor can take more than one class at a given Timeslot." }, ] SOFT_CONSTRAINT_FUNCS = [ { "id": 1, "unit_penalty": 0.9, "func": penalty_of_soft_constraint_1, "desc": "Instructors should only take certain courses they are are assigned to." }, { "id": 2, "unit_penalty": 0.85, "func": penalty_of_soft_constraint_2, "desc": "A particular Room should only allow Classes of certain Courses." }, { "id": 3, "unit_penalty": 0.6, "func": penalty_of_soft_constraint_3, "desc": "CourseGroups have Timeslot preferences." }, { "id": 4, "unit_penalty": 0.5, "func": penalty_of_soft_constraint_4, "desc": "Instructors have Timeslot preferences." }, { "id": 5, "unit_penalty": 1.0, "func": penalty_of_soft_constraint_5, "desc": "If a Course has 2 Lectures Per Week, it should take place in a composite Timeslot i.e. with Day code of 'ST' or 'MW'." }, { "id": 6, "unit_penalty": 0.8, "func": penalty_of_soft_constraint_6, "desc": "The Lab Section of a Course (if any) should be placed in a Timeslot that is before or after the corresponding Theory Section." }, { "id": 7, "unit_penalty": 0.6, "func": penalty_of_soft_constraint_7, "desc": "The Theory Section and the corresponding Lab Section of a Course (if any) should be taken by the same Instructor." }, { "id": 8, "unit_penalty": 0.9, "func": penalty_of_soft_constraint_8, "desc": "Instructors have minimum credit load requirements." }, ]
44.193548
143
0.712409
from core.constraints.hard.hard_constraint_1 import violates_hard_constraint_1 from core.constraints.hard.hard_constraint_2 import violates_hard_constraint_2 from core.constraints.soft.soft_constraint_1 import penalty_of_soft_constraint_1 from core.constraints.soft.soft_constraint_2 import penalty_of_soft_constraint_2 from core.constraints.soft.soft_constraint_3 import penalty_of_soft_constraint_3 from core.constraints.soft.soft_constraint_4 import penalty_of_soft_constraint_4 from core.constraints.soft.soft_constraint_5 import penalty_of_soft_constraint_5 from core.constraints.soft.soft_constraint_6 import penalty_of_soft_constraint_6 from core.constraints.soft.soft_constraint_7 import penalty_of_soft_constraint_7 from core.constraints.soft.soft_constraint_8 import penalty_of_soft_constraint_8 """ to learn how to add new constraints, read `core/constraints/modify_constraints.md` """ HARD_CONSTRAINT_FUNCS = [ { "id": 1, "func": violates_hard_constraint_1, "desc": "No two classes can take place in the same room at the same Timeslot." }, { "id": 2, "func": violates_hard_constraint_2, "desc": "No Instructor can take more than one class at a given Timeslot." }, ] SOFT_CONSTRAINT_FUNCS = [ { "id": 1, "unit_penalty": 0.9, "func": penalty_of_soft_constraint_1, "desc": "Instructors should only take certain courses they are are assigned to." }, { "id": 2, "unit_penalty": 0.85, "func": penalty_of_soft_constraint_2, "desc": "A particular Room should only allow Classes of certain Courses." }, { "id": 3, "unit_penalty": 0.6, "func": penalty_of_soft_constraint_3, "desc": "CourseGroups have Timeslot preferences." }, { "id": 4, "unit_penalty": 0.5, "func": penalty_of_soft_constraint_4, "desc": "Instructors have Timeslot preferences." }, { "id": 5, "unit_penalty": 1.0, "func": penalty_of_soft_constraint_5, "desc": "If a Course has 2 Lectures Per Week, it should take place in a composite Timeslot i.e. with Day code of 'ST' or 'MW'." }, { "id": 6, "unit_penalty": 0.8, "func": penalty_of_soft_constraint_6, "desc": "The Lab Section of a Course (if any) should be placed in a Timeslot that is before or after the corresponding Theory Section." }, { "id": 7, "unit_penalty": 0.6, "func": penalty_of_soft_constraint_7, "desc": "The Theory Section and the corresponding Lab Section of a Course (if any) should be taken by the same Instructor." }, { "id": 8, "unit_penalty": 0.9, "func": penalty_of_soft_constraint_8, "desc": "Instructors have minimum credit load requirements." }, ]
0
0
0
b38add5b43c8c288b94da71185957d3fc3fad700
963
py
Python
extras/scripts/convert_pronto_codes.py
aneisch/home-assistant
d8a866d75f512d23ffa7b9e5d82d7c575ece3c76
[ "Apache-2.0" ]
18
2016-08-10T01:02:27.000Z
2017-10-26T04:19:49.000Z
extras/scripts/convert_pronto_codes.py
aneisch/home-assistant
d8a866d75f512d23ffa7b9e5d82d7c575ece3c76
[ "Apache-2.0" ]
null
null
null
extras/scripts/convert_pronto_codes.py
aneisch/home-assistant
d8a866d75f512d23ffa7b9e5d82d7c575ece3c76
[ "Apache-2.0" ]
4
2017-04-20T19:41:21.000Z
2017-05-16T17:10:05.000Z
#!/usr/bin/env python # Provide code as arg: ./whatever.py "0000 ... ..." import binascii import struct if __name__ == '__main__': import sys for code in sys.argv[1:]: pronto = bytearray.fromhex(code) pulses = pronto2lirc(pronto) output = "" count = 0 for pulse in pulses: if count%2 != 0: output += "{},".format(pulse*-1) else: output += "{},".format(pulse) count += 1 print "["+output[:-1]+"]"
27.514286
90
0.554517
#!/usr/bin/env python # Provide code as arg: ./whatever.py "0000 ... ..." import binascii import struct def pronto2lirc(pronto): codes = [long(binascii.hexlify(pronto[i:i+2]), 16) for i in xrange(0, len(pronto), 2)] if codes[0]: raise ValueError('Pronto code should start with 0000') if len(codes) != 4 + 2 * (codes[2] + codes[3]): raise ValueError('Number of pulse widths does not match the preamble') frequency = 1 / (codes[1] * 0.241246) return [int(round(code / frequency)) for code in codes[4:]] if __name__ == '__main__': import sys for code in sys.argv[1:]: pronto = bytearray.fromhex(code) pulses = pronto2lirc(pronto) output = "" count = 0 for pulse in pulses: if count%2 != 0: output += "{},".format(pulse*-1) else: output += "{},".format(pulse) count += 1 print "["+output[:-1]+"]"
413
0
23
43dab3bbe7439e488e44d82a67a8a105d29e8e3d
7,328
py
Python
models/losses.py
yueyu-stu/EdgeAwareSpixel
f7f9fcb15bfa8e31bd4ad9473f9058c44a8391d7
[ "MIT" ]
null
null
null
models/losses.py
yueyu-stu/EdgeAwareSpixel
f7f9fcb15bfa8e31bd4ad9473f9058c44a8391d7
[ "MIT" ]
null
null
null
models/losses.py
yueyu-stu/EdgeAwareSpixel
f7f9fcb15bfa8e31bd4ad9473f9058c44a8391d7
[ "MIT" ]
null
null
null
import torch from torch import nn class MutualInfoLoss(nn.Module): """ Mutual Information Loss derived from ss-with-RIM that also applied in this work. First term enforces to generate a sparse nSpixel dimension vector for each pixel; Second term indicates the cardinality of each spixel. Args: logits: torch.tensor A trainable tensor of shape (b, nSpixel, h, w) that represents the probability of each pixel belonging to all spixels. It should be softmaxed before calling this loss funtion. coef: float A coefficient that controls the amplitude of second term. """ class SmoothContourLoss(nn.Module): """ Loss function that contains smoothness loss derived from ss-with-RIM and contour-aware loss. Smoothness loss concerns about smoothness of local patterns, while contour-aware loss is interested in whether two patches are divided. Cross entropy (or KL divergence) is applied to calculate Contour-aware loss. When calculating the gradients of probability, i.e. dp, and that of image, i.e. di, we desire that the distribution of dp should appoximate that of di. Args: logits: torch.tensor A trainable tensor of shape (b, nSpixel, h, w) It should be softmaxed before calling this loss function. image: torch.tensor A tensor derived from color channels of input with shape (b, c, h, w) sigma: float Parameter of transformed Gaussian kernel size weights: List[float] A List contains 2 coefficients that control the amplitudes of 2 losses thresh: float Parameter for controling the amplitude of edge margin: int Parameter for finding edge width """ class ReconstructionLoss(nn.Module): """ Reconstruction loss for validating whether the features extracted by CNN are effective Args: pred_img: torch.tensor A trainable tensor of shape (b, c, h, w), where c = 3 in default image: torch.tensor A tensor of shape (b, c, h, w) that is derived from color channels of input """ class Loss(nn.Module): """ Calculate total loss Args: pred_spixel: List[List[torch.tensor]] containing 2 Lists with spixel probabilities in x and y dir pred_img: tensor.tensor A trainable tensor of shape (b, c, h, w), where c is 3 in default img_in: tensor.tensor An input tensor of shape (b, c, h, w), where c is 20 in default These features consists of 15-channel features extracted by CNN, and 5-channel RGB () loss_weights: List[float] Weights for MutualInfoLoss, SmoothContourLoss, CompactnessLoss and Reconstruction in sequence sc_weights: List[float] Weights for SmoothContourLoss thresh: float A parameter for SmoothContourLoss coef_card: float Parameter for MutualInfoLoss sigma: float Parameter for SmoothContourLoss margin: int Parameter for SmoothContourLoss Returns: total_loss: torch.tensor A trainable tensor indicating final loss losses: List[torch.tensor] A List containing aforementioned losses in sequence """
37.387755
83
0.553357
import torch from torch import nn class MutualInfoLoss(nn.Module): """ Mutual Information Loss derived from ss-with-RIM that also applied in this work. First term enforces to generate a sparse nSpixel dimension vector for each pixel; Second term indicates the cardinality of each spixel. Args: logits: torch.tensor A trainable tensor of shape (b, nSpixel, h, w) that represents the probability of each pixel belonging to all spixels. It should be softmaxed before calling this loss funtion. coef: float A coefficient that controls the amplitude of second term. """ def __init__(self, coef=2): super().__init__() self.coef = coef def forward(self, logits): pixel_wise_ent = -(logits * torch.log(logits + 1e-16)).sum(1).mean() marginal_prob = logits.mean((2, 3)) marginal_ent = -(marginal_prob * torch.log(marginal_prob + 1e-16)).sum(1).mean() return pixel_wise_ent - self.coef * marginal_ent class SmoothContourLoss(nn.Module): """ Loss function that contains smoothness loss derived from ss-with-RIM and contour-aware loss. Smoothness loss concerns about smoothness of local patterns, while contour-aware loss is interested in whether two patches are divided. Cross entropy (or KL divergence) is applied to calculate Contour-aware loss. When calculating the gradients of probability, i.e. dp, and that of image, i.e. di, we desire that the distribution of dp should appoximate that of di. Args: logits: torch.tensor A trainable tensor of shape (b, nSpixel, h, w) It should be softmaxed before calling this loss function. image: torch.tensor A tensor derived from color channels of input with shape (b, c, h, w) sigma: float Parameter of transformed Gaussian kernel size weights: List[float] A List contains 2 coefficients that control the amplitudes of 2 losses thresh: float Parameter for controling the amplitude of edge margin: int Parameter for finding edge width """ def __init__(self, sigma=2, weights=[1, 1], thresh=1.0, margin=1): super().__init__() self.sigma = 2 * sigma**2 self.weights = weights self.thresh = thresh self.margin = margin def forward(self, logits, image): dp, di = self.get_gradients(logits, image) smooth = 0. contour = 0. for idx in range(len(dp)): smooth += self.smooth_loss(dp[idx], di[idx]) contour += self.contour_loss(dp[idx], di[idx]) return self.weights[0] * smooth + self.weights[1] * contour def get_gradients(self, logits, image): dp_dx = (logits[..., :-self.margin] - logits[..., self.margin:]) dp_dy = (logits[..., :-self.margin, :] - logits[..., self.margin:, :]) di_dx = (image[..., :-self.margin] - image[..., self.margin:]) di_dy = (image[..., :-self.margin, :] - image[..., self.margin:, :]) return [dp_dx, dp_dy], [di_dx, di_dy] def smooth_loss(self, dp, di): return (dp.abs().sum(1) * (-di.pow(2).sum(1) / self.sigma).exp()).mean() def contour_loss(self, dp, di): di_norm = di.pow(2) di_min = (di_norm.min(-1, keepdim=True).values).min(-2, keepdim=True).values di_max = (di_norm.max(-1, keepdim=True).values).max(-2, keepdim=True).values di_norm = ((di_norm - di_min) / (di_max - di_min + 1e-16)).sum(1) * 2 isValidEdges = di_norm > self.thresh dp_valid = dp.abs().sum(1) * isValidEdges di_valid = di_norm * isValidEdges return -(di_valid * (torch.log(dp_valid + 1e-16))).mean() class ReconstructionLoss(nn.Module): """ Reconstruction loss for validating whether the features extracted by CNN are effective Args: pred_img: torch.tensor A trainable tensor of shape (b, c, h, w), where c = 3 in default image: torch.tensor A tensor of shape (b, c, h, w) that is derived from color channels of input """ def __init__(self): super().__init__() self.mse_loss = nn.MSELoss() def forward(self, pred_img, image): return self.mse_loss(pred_img, image) class Loss(nn.Module): """ Calculate total loss Args: pred_spixel: List[List[torch.tensor]] containing 2 Lists with spixel probabilities in x and y dir pred_img: tensor.tensor A trainable tensor of shape (b, c, h, w), where c is 3 in default img_in: tensor.tensor An input tensor of shape (b, c, h, w), where c is 20 in default These features consists of 15-channel features extracted by CNN, and 5-channel RGB () loss_weights: List[float] Weights for MutualInfoLoss, SmoothContourLoss, CompactnessLoss and Reconstruction in sequence sc_weights: List[float] Weights for SmoothContourLoss thresh: float A parameter for SmoothContourLoss coef_card: float Parameter for MutualInfoLoss sigma: float Parameter for SmoothContourLoss margin: int Parameter for SmoothContourLoss Returns: total_loss: torch.tensor A trainable tensor indicating final loss losses: List[torch.tensor] A List containing aforementioned losses in sequence """ def __init__(self, loss_weights=[1, 2, 10], sc_weights=[1, .75], thresh=1.0, coef_card=2, sigma=2, margin=1): super().__init__() # parameters - weights self.weights = loss_weights # Loss classes self.mutual_info_loss = MutualInfoLoss(coef_card) self.sc_loss = SmoothContourLoss(sigma, sc_weights, thresh, margin) self.recon_loss = ReconstructionLoss() def forward(self, spixel_prob, recon_img, img_feat): loss_mutual = self.mutual_info_loss(spixel_prob) # use blurred & clahed image loss_sc = self.sc_loss(spixel_prob, img_feat[:, 3:-2, ...]) # use original image loss_recon = self.recon_loss(recon_img, img_feat[:, :3, ...]) loss_total = 0. losses = [loss_mutual, loss_sc, loss_recon] for idx in range(len(losses)): loss_total += self.weights[idx] * losses[idx] return loss_total, losses
3,197
0
319
2585b1efefd1186d26a369a8a60b9ee6612d3e1c
1,302
py
Python
cards/turret.py
MrCoft/EngiMod
65c90bd9231ac388d8af7849a1835914f1eefc78
[ "MIT" ]
null
null
null
cards/turret.py
MrCoft/EngiMod
65c90bd9231ac388d8af7849a1835914f1eefc78
[ "MIT" ]
null
null
null
cards/turret.py
MrCoft/EngiMod
65c90bd9231ac388d8af7849a1835914f1eefc78
[ "MIT" ]
null
null
null
from engi_mod import * Card( name = "Turret", type = "power", target = "self", rarity = "uncommon", cost = 1, const = dict( DAMAGE = 6, DAMAGE_UPGRADE = 3, MAGIC_NUMBER = "DAMAGE", ), desc = "At the end of your turn, deal !M! damage to a random enemy.", upgrade_code = """ upgradeName(); upgradeMagicNumber(DAMAGE_UPGRADE); """, ) Power( name = "Turret", desc = [ "At the end of your turn, deal #b", " damage to a random enemy." ], desc_expr = "DESCRIPTIONS[0] + amount + DESCRIPTIONS[1]", code = """ @Override public void atEndOfTurn(final boolean isPlayer) { if (AbstractDungeon.getCurrRoom().monsters.areMonstersBasicallyDead()) { AbstractDungeon.actionManager.clearPostCombatActions(); return; } flash(); AbstractMonster target = AbstractDungeon.getMonsters().getRandomMonster(true); if (target.currentHealth > 0) { AbstractDungeon.actionManager.addToBottom( new DamageAction((AbstractCreature)target, new DamageInfo(owner, amount, DamageInfo.DamageType.THORNS)) ); } } """, icon = "armor", )
28.933333
123
0.554531
from engi_mod import * Card( name = "Turret", type = "power", target = "self", rarity = "uncommon", cost = 1, const = dict( DAMAGE = 6, DAMAGE_UPGRADE = 3, MAGIC_NUMBER = "DAMAGE", ), desc = "At the end of your turn, deal !M! damage to a random enemy.", upgrade_code = """ upgradeName(); upgradeMagicNumber(DAMAGE_UPGRADE); """, ) Power( name = "Turret", desc = [ "At the end of your turn, deal #b", " damage to a random enemy." ], desc_expr = "DESCRIPTIONS[0] + amount + DESCRIPTIONS[1]", code = """ @Override public void atEndOfTurn(final boolean isPlayer) { if (AbstractDungeon.getCurrRoom().monsters.areMonstersBasicallyDead()) { AbstractDungeon.actionManager.clearPostCombatActions(); return; } flash(); AbstractMonster target = AbstractDungeon.getMonsters().getRandomMonster(true); if (target.currentHealth > 0) { AbstractDungeon.actionManager.addToBottom( new DamageAction((AbstractCreature)target, new DamageInfo(owner, amount, DamageInfo.DamageType.THORNS)) ); } } """, icon = "armor", )
0
0
0
d31cdcc12511cabf5ac61c6151db8886c0888504
494
py
Python
0x0F-python-object_relational_mapping/model_city.py
C-distin/alx-higher_level_programming
ee018135b24ac07d40f2309a4febf21b8a25aee4
[ "MIT" ]
null
null
null
0x0F-python-object_relational_mapping/model_city.py
C-distin/alx-higher_level_programming
ee018135b24ac07d40f2309a4febf21b8a25aee4
[ "MIT" ]
null
null
null
0x0F-python-object_relational_mapping/model_city.py
C-distin/alx-higher_level_programming
ee018135b24ac07d40f2309a4febf21b8a25aee4
[ "MIT" ]
null
null
null
#!/usr/bin/python3 """ Define a class City that inherits from Base """ from sqlalchemy import Column, Integer, String, ForeignKey from model_state import Base class City(Base): """ A class City that inherits from Base """ __tablename__ = "cities" id = Column(Integer, primary_key=True, nullable=False, unique=True, autoincrement=True) name = Column(String(128), nullable=False) state_id = Column(Integer, ForeignKey('states.id'), nullable=False)
24.7
71
0.686235
#!/usr/bin/python3 """ Define a class City that inherits from Base """ from sqlalchemy import Column, Integer, String, ForeignKey from model_state import Base class City(Base): """ A class City that inherits from Base """ __tablename__ = "cities" id = Column(Integer, primary_key=True, nullable=False, unique=True, autoincrement=True) name = Column(String(128), nullable=False) state_id = Column(Integer, ForeignKey('states.id'), nullable=False)
0
0
0
86d16852ee15057e22545a2d7257027440ff9f5d
2,857
py
Python
static_grouper/templatetags/static_grouper.py
adrienlachaize/dezede
584ec30cedab95152e2f95595b7691a04e6736e2
[ "BSD-3-Clause" ]
15
2015-02-10T21:16:31.000Z
2021-03-25T16:46:20.000Z
static_grouper/templatetags/static_grouper.py
adrienlachaize/dezede
584ec30cedab95152e2f95595b7691a04e6736e2
[ "BSD-3-Clause" ]
4
2021-02-10T15:42:08.000Z
2022-03-11T23:20:38.000Z
static_grouper/templatetags/static_grouper.py
adrienlachaize/dezede
584ec30cedab95152e2f95595b7691a04e6736e2
[ "BSD-3-Clause" ]
6
2016-07-10T14:20:48.000Z
2022-01-19T18:34:02.000Z
from collections import defaultdict from compressor.templatetags.compress import CompressorNode from django.template import ( Library, Node, Template, TemplateSyntaxError, Context) register = Library() CONTEXT_VARIABLE_NAME = 'static_grouper_dict' register.tag('addstatic', AddStaticNode) register.tag('static_list', StaticListNode)
32.465909
83
0.621281
from collections import defaultdict from compressor.templatetags.compress import CompressorNode from django.template import ( Library, Node, Template, TemplateSyntaxError, Context) register = Library() CONTEXT_VARIABLE_NAME = 'static_grouper_dict' class AddStaticNode(Node): def __init__(self, parser, token): contents = token.split_contents() if len(contents) not in (2, 3): raise TemplateSyntaxError if len(contents) == 3: assert contents[2] == 'nocompress' self.compress = False else: self.compress = True self.static_type = contents[1] self.nodelist = parser.parse(('endaddstatic',)) parser.delete_first_token() def render(self, context): output = self.nodelist.render(context).strip() static_grouper_dict = context.get(CONTEXT_VARIABLE_NAME) if static_grouper_dict is None: root_context = context.dicts[0] root_context[CONTEXT_VARIABLE_NAME] = \ static_grouper_dict = defaultdict(list) item = (self.compress, output) if item not in static_grouper_dict[self.static_type]: static_grouper_dict[self.static_type].append(item) return '' register.tag('addstatic', AddStaticNode) class StaticListNode(Node): def __init__(self, parser, token): contents = token.split_contents() if len(contents) not in (2, 3): raise TemplateSyntaxError self.static_type = contents[1] if len(contents) == 3: assert contents[2] == 'compress' self.compress = True else: self.compress = False self.following_nodelist = parser.parse() def groups_iterator(self, static_grouper_dict): compressed_group = [] for compress, output in static_grouper_dict[self.static_type]: if compress: compressed_group.append(output) else: if compressed_group: yield True, ''.join(compressed_group) compressed_group = [] yield False, output if compressed_group: yield True, ''.join(compressed_group) def render(self, context): static_grouper_dict = context.get(CONTEXT_VARIABLE_NAME, defaultdict(list)) following = self.following_nodelist.render(context) inner = '' for compress, output in self.groups_iterator(static_grouper_dict): if compress and self.compress: inner += CompressorNode( nodelist=Template(output).nodelist, kind=self.static_type, mode='file').render(context=context) else: inner += output return inner + following register.tag('static_list', StaticListNode)
2,322
11
179
b9d69527abbd1d87104316935718861ead22d02c
549
py
Python
solfasol/issues/migrations/0009_auto_20200918_0020.py
rekognize/solfasol
c960c3364c753d75161242eccac4f085d800c843
[ "MIT" ]
null
null
null
solfasol/issues/migrations/0009_auto_20200918_0020.py
rekognize/solfasol
c960c3364c753d75161242eccac4f085d800c843
[ "MIT" ]
1
2020-06-18T13:08:47.000Z
2020-06-18T13:08:47.000Z
solfasol/issues/migrations/0009_auto_20200918_0020.py
Solfasol/solfasol
c960c3364c753d75161242eccac4f085d800c843
[ "MIT" ]
null
null
null
# Generated by Django 3.1.1 on 2020-09-17 21:20 from django.db import migrations, models
23.869565
109
0.591985
# Generated by Django 3.1.1 on 2020-09-17 21:20 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('issues', '0008_auto_20200918_0006'), ] operations = [ migrations.RemoveField( model_name='issue', name='file_data', ), migrations.AddField( model_name='issue', name='page_data', field=models.TextField(blank=True, help_text='page data for creating pages remotely', null=True), ), ]
0
435
23
c7e38cb251d253e29429b6f992fbff071d6adf2c
627
py
Python
osm_austria_building_coverage/urls.py
thomaskonrad/osm_austria_building_coverage
3f678837b6800adfdd165f9b8424d1a258ca63de
[ "MIT" ]
2
2015-06-21T19:39:05.000Z
2015-06-22T10:54:17.000Z
osm_austria_building_coverage/urls.py
thomaskonrad/osm-austria-building-coverage
3f678837b6800adfdd165f9b8424d1a258ca63de
[ "MIT" ]
2
2018-02-17T16:51:06.000Z
2018-02-23T07:11:13.000Z
osm_austria_building_coverage/urls.py
thomaskonrad/osm_austria_building_coverage
3f678837b6800adfdd165f9b8424d1a258ca63de
[ "MIT" ]
3
2019-10-16T08:46:13.000Z
2021-04-14T23:49:07.000Z
from django.conf.urls import url from coverage_score_viewer.views import index, states, districts, municipalities, details, search, coverage_chart from map.views import map urlpatterns = [ url(r'^$', index, name='index'), url(r'^states/$', states, name='states'), url(r'^districts/$', districts, name='districts'), url(r'^municipalities/$', municipalities, name='municipalities'), url(r'^details/(?P<boundary_id>\d+)$', details, name='details'), url(r'^search$', search, name='search'), url(r'^coverage-chart.svg', coverage_chart, name='coverage_chart'), url(r'^map/$', map, name='map'), ]
33
113
0.673046
from django.conf.urls import url from coverage_score_viewer.views import index, states, districts, municipalities, details, search, coverage_chart from map.views import map urlpatterns = [ url(r'^$', index, name='index'), url(r'^states/$', states, name='states'), url(r'^districts/$', districts, name='districts'), url(r'^municipalities/$', municipalities, name='municipalities'), url(r'^details/(?P<boundary_id>\d+)$', details, name='details'), url(r'^search$', search, name='search'), url(r'^coverage-chart.svg', coverage_chart, name='coverage_chart'), url(r'^map/$', map, name='map'), ]
0
0
0
3cc7f8fa749c9cf61233fe046871543a9e762b27
672
py
Python
PythonApps/hanoi_tower.py
zactsiap/SB4-sample
b9a70d267db3a330170a5cd6aa2c0bf37bfdd5d2
[ "MIT" ]
1
2018-03-28T08:55:10.000Z
2018-03-28T08:55:10.000Z
PythonApps/hanoi_tower.py
zactsiap/Python
b9a70d267db3a330170a5cd6aa2c0bf37bfdd5d2
[ "MIT" ]
null
null
null
PythonApps/hanoi_tower.py
zactsiap/Python
b9a70d267db3a330170a5cd6aa2c0bf37bfdd5d2
[ "MIT" ]
null
null
null
#Hanoi tower import time diskNumber = 1 while diskNumber != 0: print("============================") diskNumber = input("Give how many disk you have.\nGive 0 to end.\n============================\n") print("============================") diskNumber=int(diskNumber) moveTower(diskNumber,"A","C","B") print("Thanks for your time.") time.sleep(4)
28
103
0.552083
#Hanoi tower import time def moveTower(height,fromPole, toPole, withPole): if height >= 1: moveTower(height-1,fromPole,withPole,toPole) moveDisk(fromPole,toPole) moveTower(height-1,withPole,toPole,fromPole) def moveDisk(fp,tp): print("Moving disk from",fp,"to",tp) diskNumber = 1 while diskNumber != 0: print("============================") diskNumber = input("Give how many disk you have.\nGive 0 to end.\n============================\n") print("============================") diskNumber=int(diskNumber) moveTower(diskNumber,"A","C","B") print("Thanks for your time.") time.sleep(4)
233
0
58
0c47d0dab31b2f5af3dda0da867f4d530e0f3323
8,665
py
Python
build/lib/Pythactyl/client.py
IAmGadget/Pythactyl
b0cca838d434e8ceba1a90328708b9cde3d9fbcc
[ "MIT" ]
2
2021-07-13T21:43:44.000Z
2021-09-06T08:05:35.000Z
build/lib/Pythactyl/client.py
IAmGadget/Pythactyl
b0cca838d434e8ceba1a90328708b9cde3d9fbcc
[ "MIT" ]
1
2021-09-11T16:08:43.000Z
2021-10-30T16:10:56.000Z
build/lib/Pythactyl/client.py
IAmGadget/Pythactyl
b0cca838d434e8ceba1a90328708b9cde3d9fbcc
[ "MIT" ]
null
null
null
import requests from errors import PermissionsMissing # Servers # Subusers if __name__ == "__main__": print("I dont run. No use in trying to make me lol")
36.716102
130
0.552222
import requests from errors import PermissionsMissing class PterodactylClient(object): def __init__(self, url, api_key): self.url = url + "/api/client" self.api_key = api_key def account(self): headers = { "Authorization": "Bearer {}".format(self.api_key), "Accept": "application/json", "Content-type": "application/json" } r = requests.get(self.url + "/account", headers=headers) return r.json() def check2fa(self): headers = { "Authorization": "Bearer {}".format(self.api_key), "Accept": "application/json", "Content-type": "application/json" } r = requests.get(self.url + "/account/two-factor", headers=headers) return r.json() def updateEmail(self, email, password): headers = { "Authorization": "Bearer {}".format(self.api_key), "Accept": "application/json", "Content-type": "application/json" } data = { "email": email, "password": password } r = requests.put(self.url + "/account/email", headers=headers, json=data) return r.status_code def updatePassword(self, password, newpass, confpass): if newpass != confpass: return PasswordsDontMatch(f"{newpass} does not match {confpass}. Check spelling") headers = { "Authorization": "Bearer {}".format(self.api_key), "Accept": "application/json", "Content-type": "application/json" } data = { "current_password": password, "password": newpass, "password_confirmation": confpass } r = requests.put(self.url + "/account/email", headers=headers, json=data) return r.json() def listApikeys(self): headers = { "Authorization": "Bearer {}".format(self.api_key), "Accept": "application/json", "Content-type": "application/json" } r = requests.get(self.url + "/account/api-keys", headers=headers) return r.json() def createApiKey(self, description, allowed_ips=[]): headers = { "Authorization": "Bearer {}".format(self.api_key), "Accept": "application/json", "Content-type": "application/json" } data = { "description": description, "allowed_ips": allowed_ips } r = requests.post(self.url + "/account/api-keys", headers=headers, json=data) return r.json() def removeApiKey(self, code): headers = { "Authorization": "Bearer {}".format(self.api_key), "Accept": "application/json", "Content-type": "application/json" } r = requests.delete(self.url + "/account/api-keys/" + str(code), headers=headers) if r.status_code == 404: return {'error': 'An error has occured','tips': 'Check the code is correct and/or existing'} else: return r.status_code # Servers def listServers(self): headers = { "Authorization": "Bearer {}".format(self.api_key), "Accept": "application/json", "Content-type": "application/json" } r = requests.get(self.url, headers=headers) return r.json() def getServer(self, identifier): headers = { "Authorization": "Bearer {}".format(self.api_key), "Accept": "application/json", "Content-type": "application/json" } r = requests.get(self.url + "/servers/" + str(identifier), headers=headers) return r.json() def sendPowerAction(self, identifier, action): signals = ['start', 'stop','restart','kill'] if action.lower() not in signals: return {'error': 'Incorrect signal sent','available signals': signal} headers = { "Authorization": "Bearer {}".format(self.api_key), "Accept": "application/json", "Content-type": "application/json" } data = { "signal": action } r = requests.post(self.url + "/servers/" + identifier + "/power", headers=headers, json=data) return r def sendCommand(self, identifier, command): headers = { "Authorization": "Bearer {}".format(self.api_key), "Accept": "application/json", "Content-type": "application/json" } data = { "command": command } r = requests.post(self.url + "/servers/" + identifier + "/command", headers=headers, json=data) return r def listDatabases(self, identifier): headers = { "Authorization": "Bearer {}".format(self.api_key), "Accept": "application/json", "Content-type": "application/json" } r = requests.get(self.url + "/servers/" + str(identifier) + "/databases", headers=headers) return r.json() def createDatabase(self, identifier, db_name, remote_addr="%"): headers = { "Authorization": "Bearer {}".format(self.api_key), "Accept": "application/json", "Content-type": "application/json" } data = { "database": db_name, "remote": remote_addr } r = requests.post(self.url + "/servers/" + str(identifier) + "/databases", headers=headers, json=data) return r.json() def resetDatabasePassword(self, identifier, db_id): headers = { "Authorization": "Bearer {}".format(self.api_key), "Accept": "application/json", "Content-type": "application/json" } r = requests.post(self.url + "/servers/" + str(identifier) + "/databases/" + db_id + "/rotate-password", headers=headers) return r.json() def removeDatabase(self, identifier, db_id): headers = { "Authorization": "Bearer {}".format(self.api_key), "Accept": "application/json", "Content-type": "application/json" } r = requests.delete(self.url + "/servers/" + str(identifier) + "/databases/" + db_id + "rotate-password", headers=headers) return r.json() # Subusers def listSubusers(self, identifier): headers = { "Authorization": "Bearer {}".format(self.api_key), "Accept": "application/json", "Content-type": "application/json" } r = requests.get(self.url + "/servers/" + str(identifier) + "/users", headers=headers) return r.json() def addSubuser(self, identifier, email, permissions=[]): if len(permissions) <= 0: raise PermissionsMissing("You must specify at least 1 permission node") headers = { "Authorization": "Bearer {}".format(self.api_key), "Accept": "application/json", "Content-type": "application/json" } data = { "email": email, "permissions": permissions } r = requests.post(self.url + "/servers/" + str(identifier) + "/users", headers=headers, json=data) return r.json() def getSubuser(self, identifier, uuid): headers = { "Authorization": "Bearer {}".format(self.api_key), "Accept": "application/json", "Content-type": "application/json" } r = requests.get(self.url + "/servers/" + str(identifier) + "/users/" + uuid, headers=headers) return r.json() def updateSubuser(self, identifier, uuid, permissions=[]): if len(permissions) <= 0: raise PermissionsMissing("You must specify at least 1 permission node") headers = { "Authorization": "Bearer {}".format(self.api_key), "Accept": "application/json", "Content-type": "application/json" } data = { "permissions": permissions } r = requests.post(self.url + "/servers/" + str(identifier) + "/users/" + uuid, headers=headers, json=data) return r.json() def removeSubuser(self, identifier, uuid): headers = { "Authorization": "Bearer {}".format(self.api_key), "Accept": "application/json", "Content-type": "application/json" } r = requests.delete(self.url + "/servers/" + str(identifier) + "/users/" + uuid, headers=headers, json=data) return r.json() if __name__ == "__main__": print("I dont run. No use in trying to make me lol")
7,888
11
588
2958aa609c7322fec1b8dd23a67c95f349cb1219
1,110
py
Python
terse/Grammar/Command_Assign.py
talipovm/Terse
e6bfd3ac0d411b18d5167019623d5451695e787c
[ "MIT" ]
2
2015-04-08T21:28:35.000Z
2015-04-08T23:25:04.000Z
terse/Grammar/Command_Assign.py
talipovm/Terse
e6bfd3ac0d411b18d5167019623d5451695e787c
[ "MIT" ]
null
null
null
terse/Grammar/Command_Assign.py
talipovm/Terse
e6bfd3ac0d411b18d5167019623d5451695e787c
[ "MIT" ]
1
2015-04-08T21:29:22.000Z
2015-04-08T21:29:22.000Z
from Containers.ParsedStructure import ParsedElement from Tools.misc import strip_all from Grammar.Top_Grammar import Top_Grammar from Grammar.Functions import ExpressionFactory import logging log = logging.getLogger(__name__)
32.647059
101
0.667568
from Containers.ParsedStructure import ParsedElement from Tools.misc import strip_all from Grammar.Top_Grammar import Top_Grammar from Grammar.Functions import ExpressionFactory import logging log = logging.getLogger(__name__) class Command_Assign(Top_Grammar): def __init__(self, GI, FI, parsed_container, troublemakers): super().__init__(GI, FI, parsed_container, troublemakers) s = self.GI.s v = s.split('=', maxsplit=1) s_keys, s_function = strip_all(v) self.keys = strip_all(s_keys.split(',')) self.f = ExpressionFactory(s_function,f_get_params=self.parsed_container.last_value).assign() def execute(self): vals = self.f.get_value(self.FI.s) if vals is None: # expression was not found return if self.keys == ['row']: # are we parsing a table row? return vals if len(self.keys) != len(vals): raise SyntaxError for key, val in zip(self.keys,vals): assigned_element = ParsedElement(key, val) self.parsed_container.append(assigned_element)
794
13
76
2543280c31c85783d24075e8fcbb3059c3236aa9
136
py
Python
minos/aggregate/models/diffs/__init__.py
Clariteia/minos_microservice_aggregate
517db2f1abb11f5a8f2d77da76613061590ee5a7
[ "MIT" ]
3
2021-11-05T08:47:45.000Z
2021-11-17T09:37:26.000Z
minos/aggregate/models/diffs/__init__.py
Clariteia/minos_microservice_aggregate
517db2f1abb11f5a8f2d77da76613061590ee5a7
[ "MIT" ]
30
2021-11-05T08:49:28.000Z
2022-01-28T12:00:56.000Z
minos/aggregate/models/diffs/__init__.py
Clariteia/minos_microservice_aggregate
517db2f1abb11f5a8f2d77da76613061590ee5a7
[ "MIT" ]
null
null
null
from .aggregates import ( AggregateDiff, ) from .fields import ( FieldDiff, FieldDiffContainer, IncrementalFieldDiff, )
15.111111
25
0.705882
from .aggregates import ( AggregateDiff, ) from .fields import ( FieldDiff, FieldDiffContainer, IncrementalFieldDiff, )
0
0
0
54e7f05eb53e54dca85ec1cb19c90651ae041cb3
2,596
py
Python
app/api/schema/image_sizes.py
akashtalole/python-flask-restful-api
475d8fd7be1724183716a197aac4257f8fbbeac4
[ "MIT" ]
3
2019-09-05T05:28:49.000Z
2020-06-10T09:03:37.000Z
app/api/schema/image_sizes.py
akashtalole/python-flask-restful-api
475d8fd7be1724183716a197aac4257f8fbbeac4
[ "MIT" ]
null
null
null
app/api/schema/image_sizes.py
akashtalole/python-flask-restful-api
475d8fd7be1724183716a197aac4257f8fbbeac4
[ "MIT" ]
null
null
null
from marshmallow_jsonapi import fields from marshmallow_jsonapi.flask import Schema from app.api.helpers.utilities import dasherize from utils.common import use_defaults @use_defaults() class EventImageSizeSchema(Schema): """ Api schema for image_size Model """ class Meta: """ Meta class for image_size Api Schema """ type_ = 'event-image-size' self_view = 'v1.event_image_size_detail' self_view_kwargs = {'id': '<id>'} inflect = dasherize id = fields.Str(dump_only=True) type = fields.Str(allow_none=True) full_width = fields.Integer(validate=lambda n: n >= 0, allow_none=True) full_height = fields.Integer(validate=lambda n: n >= 0, allow_none=True) full_aspect = fields.Boolean(default=False) full_quality = fields.Integer(validate=lambda n: 0 <= n <= 100, allow_none=True) icon_width = fields.Integer(validate=lambda n: n >= 0, allow_none=True) icon_height = fields.Integer(validate=lambda n: n >= 0, allow_none=True) icon_aspect = fields.Boolean(default=False) icon_quality = fields.Integer(validate=lambda n: 0 <= n <= 100, allow_none=True) thumbnail_width = fields.Integer(validate=lambda n: n >= 0, allow_none=True) thumbnail_height = fields.Integer(validate=lambda n: n >= 0, allow_none=True) thumbnail_aspect = fields.Boolean(default=False) thumbnail_quality = fields.Integer(validate=lambda n: 0 <= n <= 100, allow_none=True) logo_width = fields.Integer(validate=lambda n: n >= 0, allow_none=True) logo_height = fields.Integer(validate=lambda n: n >= 0, allow_none=True) @use_defaults() class SpeakerImageSizeSchema(Schema): """ Api schema for image_size Model """ class Meta: """ Meta class for image_size Api Schema """ type_ = 'speaker-image-size' self_view = 'v1.speaker_image_size_detail' self_view_kwargs = {'id': '<id>'} inflect = dasherize id = fields.Str(dump_only=True) type = fields.Str(allow_none=True) small_size_width_height = fields.Integer(validate=lambda n: n >= 0, allow_none=True) small_size_quality = fields.Integer(validate=lambda n: 0 <= n <= 100, allow_none=True) thumbnail_size_width_height = fields.Integer(validate=lambda n: n >= 0, allow_none=True) thumbnail_size_quality = fields.Integer(validate=lambda n: 0 <= n <= 100, allow_none=True) icon_size_width_height = fields.Integer(validate=lambda n: n >= 0, allow_none=True) icon_size_quality = fields.Integer(validate=lambda n: 0 <= n <= 100, allow_none=True)
41.870968
94
0.692604
from marshmallow_jsonapi import fields from marshmallow_jsonapi.flask import Schema from app.api.helpers.utilities import dasherize from utils.common import use_defaults @use_defaults() class EventImageSizeSchema(Schema): """ Api schema for image_size Model """ class Meta: """ Meta class for image_size Api Schema """ type_ = 'event-image-size' self_view = 'v1.event_image_size_detail' self_view_kwargs = {'id': '<id>'} inflect = dasherize id = fields.Str(dump_only=True) type = fields.Str(allow_none=True) full_width = fields.Integer(validate=lambda n: n >= 0, allow_none=True) full_height = fields.Integer(validate=lambda n: n >= 0, allow_none=True) full_aspect = fields.Boolean(default=False) full_quality = fields.Integer(validate=lambda n: 0 <= n <= 100, allow_none=True) icon_width = fields.Integer(validate=lambda n: n >= 0, allow_none=True) icon_height = fields.Integer(validate=lambda n: n >= 0, allow_none=True) icon_aspect = fields.Boolean(default=False) icon_quality = fields.Integer(validate=lambda n: 0 <= n <= 100, allow_none=True) thumbnail_width = fields.Integer(validate=lambda n: n >= 0, allow_none=True) thumbnail_height = fields.Integer(validate=lambda n: n >= 0, allow_none=True) thumbnail_aspect = fields.Boolean(default=False) thumbnail_quality = fields.Integer(validate=lambda n: 0 <= n <= 100, allow_none=True) logo_width = fields.Integer(validate=lambda n: n >= 0, allow_none=True) logo_height = fields.Integer(validate=lambda n: n >= 0, allow_none=True) @use_defaults() class SpeakerImageSizeSchema(Schema): """ Api schema for image_size Model """ class Meta: """ Meta class for image_size Api Schema """ type_ = 'speaker-image-size' self_view = 'v1.speaker_image_size_detail' self_view_kwargs = {'id': '<id>'} inflect = dasherize id = fields.Str(dump_only=True) type = fields.Str(allow_none=True) small_size_width_height = fields.Integer(validate=lambda n: n >= 0, allow_none=True) small_size_quality = fields.Integer(validate=lambda n: 0 <= n <= 100, allow_none=True) thumbnail_size_width_height = fields.Integer(validate=lambda n: n >= 0, allow_none=True) thumbnail_size_quality = fields.Integer(validate=lambda n: 0 <= n <= 100, allow_none=True) icon_size_width_height = fields.Integer(validate=lambda n: n >= 0, allow_none=True) icon_size_quality = fields.Integer(validate=lambda n: 0 <= n <= 100, allow_none=True)
0
0
0
80ccd07a50d9e26f0f3c296670920b067e2d2e5e
1,043
py
Python
Algorithms/0097_Interleaving_String/Python/Interleaving_String_Solution_1.py
lht19900714/Leetcode_Solutions
dac7a038329a5c1f8a78e86cc6f49116b963f1fb
[ "MIT" ]
null
null
null
Algorithms/0097_Interleaving_String/Python/Interleaving_String_Solution_1.py
lht19900714/Leetcode_Solutions
dac7a038329a5c1f8a78e86cc6f49116b963f1fb
[ "MIT" ]
null
null
null
Algorithms/0097_Interleaving_String/Python/Interleaving_String_Solution_1.py
lht19900714/Leetcode_Solutions
dac7a038329a5c1f8a78e86cc6f49116b963f1fb
[ "MIT" ]
null
null
null
# Space: O(n) # Time: O(n)
28.972222
99
0.540748
# Space: O(n) # Time: O(n) class Solution: def isInterleave(self, s1: str, s2: str, s3: str) -> bool: length_1, length_2, length_3 = len(s1), len(s2), len(s3) if length_1 == length_2 == length_3 == 0: return True if length_1 + length_2 != length_3: return False cache = [[None for _ in range(length_1)] for _ in range(length_2)] def dfs(board, s1, s2, s3, index1, index2, index3): if index1 == len(s1): return s2[index2:] == s3[index3:] if index2 == len(s2): return s1[index1:] == s3[index3:] if board[index2][index1] is not None: return board[index2][index1] ans = False if s1[index1] == s3[index3] and dfs(board, s1, s2, s3, index1 + 1, index2, index3 + 1): ans = True if s2[index2] == s3[index3] and dfs(board, s1, s2, s3, index1, index2 + 1, index3 + 1): ans = True board[index2][index1] = ans return ans return dfs(cache, s1, s2, s3, 0, 0, 0)
964
-6
49
a2cfb3dd847c2c2f892bf3b4da52f1a13e5ade61
338
py
Python
carProject/carApp/models.py
cs-fullstack-2019-spring/django-validation-cw-cgarciapieto
51df6fbd8d4e5ad878c4a620ca822ecd9df55711
[ "Apache-2.0" ]
null
null
null
carProject/carApp/models.py
cs-fullstack-2019-spring/django-validation-cw-cgarciapieto
51df6fbd8d4e5ad878c4a620ca822ecd9df55711
[ "Apache-2.0" ]
null
null
null
carProject/carApp/models.py
cs-fullstack-2019-spring/django-validation-cw-cgarciapieto
51df6fbd8d4e5ad878c4a620ca822ecd9df55711
[ "Apache-2.0" ]
null
null
null
from django.db import models # car Model with input/attributes make, model, year, mpg
28.166667
56
0.718935
from django.db import models # car Model with input/attributes make, model, year, mpg class CarModel(models.Model): make = models.CharField(max_length=200) model = models.CharField(max_length=200) year = models.IntegerField(default=0) mpg = models.IntegerField(default=0) def __str__(self): return self.make
22
207
23
e8525b400d730f96fedff9ad040d2992ff14e0fd
193
py
Python
ecfactory/mnt_cycles/mnt_cycles_examples.py
weikengchen/ecfactory
f509c00b7cf66f4b8dbe9540599a4c95b9742bfd
[ "MIT" ]
39
2016-06-09T13:47:57.000Z
2022-02-10T14:06:20.000Z
ecfactory/mnt_cycles/mnt_cycles_examples.py
frevson/ecfactory-A-SageMath-Library-for-Constructing-Elliptic-Curves
f509c00b7cf66f4b8dbe9540599a4c95b9742bfd
[ "MIT" ]
3
2019-04-26T14:15:34.000Z
2021-02-03T09:21:37.000Z
ecfactory/mnt_cycles/mnt_cycles_examples.py
frevson/ecfactory-A-SageMath-Library-for-Constructing-Elliptic-Curves
f509c00b7cf66f4b8dbe9540599a4c95b9742bfd
[ "MIT" ]
13
2017-09-27T08:08:49.000Z
2022-03-28T12:11:20.000Z
import ecfactory.mnt_cycles as mnt_cycles # Example (find an MNT cycle with D = -19) cycles = mnt_cycles.make_cycle(-19) print('Found a cycle: ' + str(cycles[0][0]) + ', ' + str(cycles[0][1]))
38.6
71
0.678756
import ecfactory.mnt_cycles as mnt_cycles # Example (find an MNT cycle with D = -19) cycles = mnt_cycles.make_cycle(-19) print('Found a cycle: ' + str(cycles[0][0]) + ', ' + str(cycles[0][1]))
0
0
0
394e112047884d425fd5bac24f0c8c865dbcab28
2,159
py
Python
tests/unit/test_core.py
ehebert/pipenv
b771621274fcdb6980b4c9682bd2b2879e3354d1
[ "MIT" ]
2
2018-11-06T04:53:13.000Z
2018-11-08T22:10:20.000Z
tests/unit/test_core.py
ehebert/pipenv
b771621274fcdb6980b4c9682bd2b2879e3354d1
[ "MIT" ]
1
2018-11-25T10:50:57.000Z
2018-11-25T10:50:57.000Z
tests/unit/test_core.py
ehebert/pipenv
b771621274fcdb6980b4c9682bd2b2879e3354d1
[ "MIT" ]
1
2021-07-03T03:30:45.000Z
2021-07-03T03:30:45.000Z
import os import pytest import mock from pipenv._compat import TemporaryDirectory from pipenv.core import warn_in_virtualenv, load_dot_env from pipenv.utils import temp_environ @mock.patch('pipenv.environments.PIPENV_VIRTUALENV', 'totallyrealenv') @mock.patch('pipenv.environments.PIPENV_VERBOSITY', -1) @pytest.mark.core @pytest.mark.core @pytest.mark.core @pytest.mark.core
35.983333
84
0.692913
import os import pytest import mock from pipenv._compat import TemporaryDirectory from pipenv.core import warn_in_virtualenv, load_dot_env from pipenv.utils import temp_environ @mock.patch('pipenv.environments.PIPENV_VIRTUALENV', 'totallyrealenv') @mock.patch('pipenv.environments.PIPENV_VERBOSITY', -1) @pytest.mark.core def test_suppress_nested_venv_warning(capsys): # Capture the stderr of warn_in_virtualenv to test for the presence of the # courtesy notice. warn_in_virtualenv() output, err = capsys.readouterr() assert 'Courtesy Notice' not in err @pytest.mark.core def test_load_dot_env_from_environment_variable_location(capsys): with temp_environ(), TemporaryDirectory(prefix='pipenv-', suffix='') as tempdir: dotenv_path = os.path.join(tempdir.name, 'test.env') key, val = 'SOME_KEY', 'some_value' with open(dotenv_path, 'w') as f: f.write('{}={}'.format(key, val)) with mock.patch('pipenv.environments.PIPENV_DOTENV_LOCATION', dotenv_path): load_dot_env() assert os.environ[key] == val @pytest.mark.core def test_doesnt_load_dot_env_if_disabled(capsys): with temp_environ(), TemporaryDirectory(prefix='pipenv-', suffix='') as tempdir: dotenv_path = os.path.join(tempdir.name, 'test.env') key, val = 'SOME_KEY', 'some_value' with open(dotenv_path, 'w') as f: f.write('{}={}'.format(key, val)) with mock.patch('pipenv.environments.PIPENV_DOTENV_LOCATION', dotenv_path): with mock.patch('pipenv.environments.PIPENV_DONT_LOAD_ENV', '1'): load_dot_env() assert key not in os.environ load_dot_env() assert key in os.environ @pytest.mark.core def test_load_dot_env_warns_if_file_doesnt_exist(capsys): with temp_environ(), TemporaryDirectory(prefix='pipenv-', suffix='') as tempdir: dotenv_path = os.path.join(tempdir.name, 'does-not-exist.env') with mock.patch('pipenv.environments.PIPENV_DOTENV_LOCATION', dotenv_path): load_dot_env() output, err = capsys.readouterr() assert 'Warning' in err
1,685
0
88
19600211c81f047065f97d9d51dc1a3189acd673
4,107
py
Python
data_collector_app.py
durgeshsamariya/foodify.ai
c058996fb095103f3b28e1d439073cf4ddbd26b0
[ "MIT" ]
2
2022-02-17T06:25:10.000Z
2022-02-20T03:57:37.000Z
data_collector_app.py
durgeshsamariya/foodify.ai
c058996fb095103f3b28e1d439073cf4ddbd26b0
[ "MIT" ]
null
null
null
data_collector_app.py
durgeshsamariya/foodify.ai
c058996fb095103f3b28e1d439073cf4ddbd26b0
[ "MIT" ]
null
null
null
import PIL import streamlit as st import datetime import uuid from streamlit.uploaded_file_manager import UploadedFile from utils.utils import upload_image, create_unique_filename, save_to_google_sheet st.title("Foodify.ai Data Collection 🍔🌯🍫") st.write( "Upload or take a photo of your food and help us to build the world's biggest \ indian food image database!" ) # Store image upload ID as key, this will be changed once image is uploaded if "upload_key" not in st.session_state: st.session_state["upload_key"] = str(uuid.uuid4()) uploaded_image = st.file_uploader( label="Upload an image of any indian food", type=["png", "jpeg", "jpg"], help="Tip: if you're on a mobile device you can also take a photo", # set the key for the uploaded file key=st.session_state["upload_key"], ) def display_image(img: UploadedFile) -> PIL.Image: """ Displays an image if the image exists. """ displayed_image = None if img is not None: # Show the image img = PIL.Image.open(img) print("Displaying image...") print(img.height, img.width) displayed_image = st.image(img, use_column_width="auto") return img, displayed_image image, displayed_image = display_image(uploaded_image) # Create image label form to submit st.write("## Image details") with st.form(key="image_metadata_submit_form", clear_on_submit=True): # Image label label = st.text_input( label="What food(s) it is in the image that you have uploaded? \ You can enter text like: '*biryani*' or '*parotha, aloo parotha*' or '*dosa, masala dosa*' ", max_chars=100, ) # Image upload location place = st.text_input( label="Where are you uploading this images? \ You can enter state name if you are uploading image from India else you can enter country name. ", autocomplete="place", max_chars=100, ) # Person email email = st.text_input( label="What's your email? (optional, we'll use this to contact you \ about the app/say thank you for your image(s))", autocomplete="email", ) # Disclaimer st.info( '**Note:** If you click "upload image", your image will be stored on \ our servers and we will use this image to the largest indian food image database\ in the world! *(Do not upload anything sensitive, \ as we will make it publically available soon)*' ) # Submit button + logic submit_button = st.form_submit_button( label="Upload image", help="Click to upload your image and label to Foodify.ai servers.", ) if submit_button: if uploaded_image is None: st.error("Please upload an image.") else: # Generate unique filename for the image unique_image_id = create_unique_filename() # Make timestamp current_time = datetime.datetime.now().strftime( "%Y-%m-%d %H:%M:%S" ) #Upload image object to AWS S3 Bucket with st.spinner("Uploading Image to our server..."): upload_image( source_file=uploaded_image, destination_file_name=unique_image_id + ".jpg", ) st.success( f"Your image of {label} has been uploaded sucessfully! Thank you for your contribution :)" ) info = [ [ unique_image_id, current_time, image.height, image.width, label, place, email ] ] save_to_google_sheet(info) # Remove (displayed) image after upload successful displayed_image.empty() # To do this, the key it's stored under Streamlit's # UploadedFile gets changed to something random st.session_state["upload_key"] = str(uuid.uuid4())
33.390244
110
0.600195
import PIL import streamlit as st import datetime import uuid from streamlit.uploaded_file_manager import UploadedFile from utils.utils import upload_image, create_unique_filename, save_to_google_sheet st.title("Foodify.ai Data Collection 🍔🌯🍫") st.write( "Upload or take a photo of your food and help us to build the world's biggest \ indian food image database!" ) # Store image upload ID as key, this will be changed once image is uploaded if "upload_key" not in st.session_state: st.session_state["upload_key"] = str(uuid.uuid4()) uploaded_image = st.file_uploader( label="Upload an image of any indian food", type=["png", "jpeg", "jpg"], help="Tip: if you're on a mobile device you can also take a photo", # set the key for the uploaded file key=st.session_state["upload_key"], ) def display_image(img: UploadedFile) -> PIL.Image: """ Displays an image if the image exists. """ displayed_image = None if img is not None: # Show the image img = PIL.Image.open(img) print("Displaying image...") print(img.height, img.width) displayed_image = st.image(img, use_column_width="auto") return img, displayed_image image, displayed_image = display_image(uploaded_image) # Create image label form to submit st.write("## Image details") with st.form(key="image_metadata_submit_form", clear_on_submit=True): # Image label label = st.text_input( label="What food(s) it is in the image that you have uploaded? \ You can enter text like: '*biryani*' or '*parotha, aloo parotha*' or '*dosa, masala dosa*' ", max_chars=100, ) # Image upload location place = st.text_input( label="Where are you uploading this images? \ You can enter state name if you are uploading image from India else you can enter country name. ", autocomplete="place", max_chars=100, ) # Person email email = st.text_input( label="What's your email? (optional, we'll use this to contact you \ about the app/say thank you for your image(s))", autocomplete="email", ) # Disclaimer st.info( '**Note:** If you click "upload image", your image will be stored on \ our servers and we will use this image to the largest indian food image database\ in the world! *(Do not upload anything sensitive, \ as we will make it publically available soon)*' ) # Submit button + logic submit_button = st.form_submit_button( label="Upload image", help="Click to upload your image and label to Foodify.ai servers.", ) if submit_button: if uploaded_image is None: st.error("Please upload an image.") else: # Generate unique filename for the image unique_image_id = create_unique_filename() # Make timestamp current_time = datetime.datetime.now().strftime( "%Y-%m-%d %H:%M:%S" ) #Upload image object to AWS S3 Bucket with st.spinner("Uploading Image to our server..."): upload_image( source_file=uploaded_image, destination_file_name=unique_image_id + ".jpg", ) st.success( f"Your image of {label} has been uploaded sucessfully! Thank you for your contribution :)" ) info = [ [ unique_image_id, current_time, image.height, image.width, label, place, email ] ] save_to_google_sheet(info) # Remove (displayed) image after upload successful displayed_image.empty() # To do this, the key it's stored under Streamlit's # UploadedFile gets changed to something random st.session_state["upload_key"] = str(uuid.uuid4())
0
0
0
62c7d6156a97c6551bc35203a263f248aed6d089
2,525
py
Python
libs/orm.py
BreakUnrealGod/TanTan
043454a76ee27d61e7d9aede7818f9127e34aaf2
[ "MIT" ]
null
null
null
libs/orm.py
BreakUnrealGod/TanTan
043454a76ee27d61e7d9aede7818f9127e34aaf2
[ "MIT" ]
10
2019-12-04T23:38:04.000Z
2022-02-10T09:53:59.000Z
swiper/libs/orm.py
lijiaqipy/test1
ab628a794ab67e153b929c819c876c5a676ab068
[ "MIT" ]
null
null
null
from django.db import models from django.core.cache import cache from common import config # class ModelToDictMixin(object): def get(cls, *args, **kwargs): """ 为 objects 管理类的 get 方法增加缓存功能 Model.get(pk=123) :param self: :param args: :param kwargs: :return: """ # 1、从缓存中获取数据 # 根据 pk 或 id 字段获得模型主键 if 'pk' in kwargs: pk = kwargs.get('pk') else: pk = kwargs.get('id') if pk is not None: # 根据主键生成的 key,从缓存中获得数据 key = config.MODEL_CACHE_PREFIX % (cls.__name__, pk) model_obj = cache.get(key) # 如果缓存中不为空,则返回,否则执行原有数据库操作 if isinstance(model_obj, cls): return model_obj # 2、如果缓存中不存在,则从数据库中获得数据 model_obj = cls.objects.get(*args, **kwargs) # 3、将数据库中返回的数据保存至缓存 key = config.MODEL_CACHE_PREFIX % (cls.__name__, model_obj.pk) cache.set(key, model_obj) return model_obj def get_or_create(cls, defaults=None, **kwargs): """ 为 objects 管理类的 get_or_create 增加缓存操作 User.get_or_create() :param cls: :param defaults: :param kwargs: :return: """ # 根据 pk 或 id 字段获得模型主键 if 'pk' in kwargs: pk = kwargs.get('pk') else: pk = kwargs.get('id') if pk is not None: # 根据主键生成的 key,从缓存中获得数据 key = config.MODEL_CACHE_PREFIX % (cls.__name__, pk) model_obj = cache.get(key) # 如果缓存中不为空,则返回,否则执行原有数据库操作 if isinstance(model_obj, cls): return model_obj, False model_obj, created = cls.objects.get_or_create(defaults=None, **kwargs) key = config.MODEL_CACHE_PREFIX % (cls.__name__, model_obj.pk) cache.set(key, model_obj) return model_obj, created
22.954545
75
0.636832
from django.db import models from django.core.cache import cache from common import config # class ModelToDictMixin(object): def to_dict(self, exclude=[]): attr_dict = {} for field in self._meta.fields: field_name = field.attname if field_name not in exclude: attr_dict[field_name] = getattr(self, field_name) return attr_dict def get(cls, *args, **kwargs): """ 为 objects 管理类的 get 方法增加缓存功能 Model.get(pk=123) :param self: :param args: :param kwargs: :return: """ # 1、从缓存中获取数据 # 根据 pk 或 id 字段获得模型主键 if 'pk' in kwargs: pk = kwargs.get('pk') else: pk = kwargs.get('id') if pk is not None: # 根据主键生成的 key,从缓存中获得数据 key = config.MODEL_CACHE_PREFIX % (cls.__name__, pk) model_obj = cache.get(key) # 如果缓存中不为空,则返回,否则执行原有数据库操作 if isinstance(model_obj, cls): return model_obj # 2、如果缓存中不存在,则从数据库中获得数据 model_obj = cls.objects.get(*args, **kwargs) # 3、将数据库中返回的数据保存至缓存 key = config.MODEL_CACHE_PREFIX % (cls.__name__, model_obj.pk) cache.set(key, model_obj) return model_obj def get_or_create(cls, defaults=None, **kwargs): """ 为 objects 管理类的 get_or_create 增加缓存操作 User.get_or_create() :param cls: :param defaults: :param kwargs: :return: """ # 根据 pk 或 id 字段获得模型主键 if 'pk' in kwargs: pk = kwargs.get('pk') else: pk = kwargs.get('id') if pk is not None: # 根据主键生成的 key,从缓存中获得数据 key = config.MODEL_CACHE_PREFIX % (cls.__name__, pk) model_obj = cache.get(key) # 如果缓存中不为空,则返回,否则执行原有数据库操作 if isinstance(model_obj, cls): return model_obj, False model_obj, created = cls.objects.get_or_create(defaults=None, **kwargs) key = config.MODEL_CACHE_PREFIX % (cls.__name__, model_obj.pk) cache.set(key, model_obj) return model_obj, created def save(self, force_insert=False, force_update=False, using=None, update_fields=None): # 使用 models.Model 原有的实例方法保存数据 self._save(force_insert=False, force_update=False, using=None, update_fields=None) # 将实例保存到缓存 key = config.MODEL_CACHE_PREFIX % (self.__class__.__name__, self.pk) cache.set(key, self) def path_model(): models.Model.get = classmethod(get) models.Model.get_or_create = classmethod(get_or_create) models.Model._save = models.Model.save models.Model.save = save models.Model.to_dict = to_dict
794
0
68
a3a6bcc50693cebd7f3c061c9e8471bb491ad207
3,142
py
Python
airbyte-integrations/connectors/source-trello/unit_tests/test_control_rate_limit.py
OTRI-Unipd/OTRI-airbyte
50eeeb773f75246e86c6e167b0cd7d2dda6efe0d
[ "MIT" ]
6,215
2020-09-21T13:45:56.000Z
2022-03-31T21:21:45.000Z
airbyte-integrations/connectors/source-trello/unit_tests/test_control_rate_limit.py
OTRI-Unipd/OTRI-airbyte
50eeeb773f75246e86c6e167b0cd7d2dda6efe0d
[ "MIT" ]
8,448
2020-09-21T00:43:50.000Z
2022-03-31T23:56:06.000Z
airbyte-integrations/connectors/source-trello/unit_tests/test_control_rate_limit.py
OTRI-Unipd/OTRI-airbyte
50eeeb773f75246e86c6e167b0cd7d2dda6efe0d
[ "MIT" ]
1,251
2020-09-20T05:48:47.000Z
2022-03-31T10:41:29.000Z
# # Copyright (c) 2021 Airbyte, Inc., all rights reserved. # from functools import wraps import requests # Define standard timings in seconds SLEEP_ON_HIGH_LOAD: float = 9.0 TEST_DATA_FIELD = "some_data_field" TEST_RATE_LIMIT_THRESHOLD = 0.1 TEST_HEADERS_NAME = [ ("x-rate-limit-api-key-remaining", "x-rate-limit-api-key-max"), ("x-rate-limit-api-token-remaining", "x-rate-limit-api-token-max"), ] def control_request_rate_limit_decorator(threshold: float = 0.05, limit_headers=None): """ This decorator was replicated completely, as separeted function in order to be tested. The only difference is: :: the real one inside utils.py sleeps the actual defined time and returns the function back, :: and this fake one simply sleeps and returns the wait_time as actual sleep time in order to be tested. """ return decorator # Simulating real function call based CDK's parse_response() method @control_request_rate_limit_decorator(TEST_RATE_LIMIT_THRESHOLD, TEST_HEADERS_NAME) def test_with_load(requests_mock): """ Test simulates high load of rate limit. In this case we should wait at least 9 sec before next API call. """ test_response_header = { "x-rate-limit-api-token-max": "300", "x-rate-limit-api-token-remaining": "10", "x-rate-limit-api-key-max": "300", "x-rate-limit-api-key-remaining": "100", } requests_mock.get("https://test.trello.com/", headers=test_response_header) test_response = requests.get("https://test.trello.com/") actual_sleep_time = fake_parse_response(test_response) assert SLEEP_ON_HIGH_LOAD == actual_sleep_time
34.911111
116
0.668046
# # Copyright (c) 2021 Airbyte, Inc., all rights reserved. # from functools import wraps import requests # Define standard timings in seconds SLEEP_ON_HIGH_LOAD: float = 9.0 TEST_DATA_FIELD = "some_data_field" TEST_RATE_LIMIT_THRESHOLD = 0.1 TEST_HEADERS_NAME = [ ("x-rate-limit-api-key-remaining", "x-rate-limit-api-key-max"), ("x-rate-limit-api-token-remaining", "x-rate-limit-api-token-max"), ] def control_request_rate_limit_decorator(threshold: float = 0.05, limit_headers=None): """ This decorator was replicated completely, as separeted function in order to be tested. The only difference is: :: the real one inside utils.py sleeps the actual defined time and returns the function back, :: and this fake one simply sleeps and returns the wait_time as actual sleep time in order to be tested. """ def decorator(func): @wraps(func) def wrapper_control_request_rate_limit(*args, **kwargs): sleep_time = 0 free_load = float("inf") # find the Response inside args list for arg in args: response = arg if type(arg) is requests.models.Response else None # Get the rate_limits from response rate_limits = ( [ (response.headers.get(rate_remaining_limit_header), response.headers.get(rate_max_limit_header)) for rate_remaining_limit_header, rate_max_limit_header in limit_headers ] if response else None ) # define current load from rate_limits if rate_limits: for current_rate, max_rate_limit in rate_limits: free_load = min(free_load, int(current_rate) / int(max_rate_limit)) # define sleep time based on load conditions if free_load <= threshold: sleep_time = SLEEP_ON_HIGH_LOAD # for this test RETURN sleep_time based on load conditions return sleep_time return wrapper_control_request_rate_limit return decorator # Simulating real function call based CDK's parse_response() method @control_request_rate_limit_decorator(TEST_RATE_LIMIT_THRESHOLD, TEST_HEADERS_NAME) def fake_parse_response(response: requests.Response, **kwargs): json_response = response.json() records = json_response.get(TEST_DATA_FIELD, []) if TEST_DATA_FIELD is not None else json_response yield from records def test_with_load(requests_mock): """ Test simulates high load of rate limit. In this case we should wait at least 9 sec before next API call. """ test_response_header = { "x-rate-limit-api-token-max": "300", "x-rate-limit-api-token-remaining": "10", "x-rate-limit-api-key-max": "300", "x-rate-limit-api-key-remaining": "100", } requests_mock.get("https://test.trello.com/", headers=test_response_header) test_response = requests.get("https://test.trello.com/") actual_sleep_time = fake_parse_response(test_response) assert SLEEP_ON_HIGH_LOAD == actual_sleep_time
1,434
0
49
d9d1654b34f40e0c7abcc17dff20fa72909b9f64
8,810
py
Python
xmonitor/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py
froyobin/xmonitor
092dcaa01f834353ffd8dd3c40edf9e97543bfe8
[ "Apache-2.0" ]
null
null
null
xmonitor/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py
froyobin/xmonitor
092dcaa01f834353ffd8dd3c40edf9e97543bfe8
[ "Apache-2.0" ]
null
null
null
xmonitor/db/sqlalchemy/migrate_repo/versions/017_quote_encrypted_swift_credentials.py
froyobin/xmonitor
092dcaa01f834353ffd8dd3c40edf9e97543bfe8
[ "Apache-2.0" ]
null
null
null
# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This migration handles migrating encrypted image location values from the unquoted form to the quoted form. If 'metadata_encryption_key' is specified in the config then this migration performs the following steps for every entry in the images table: 1. Decrypt the location value with the metadata_encryption_key 2. Changes the value to its quoted form 3. Encrypts the new value with the metadata_encryption_key 4. Inserts the new value back into the row Fixes bug #1081043 """ import types # noqa from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils import six.moves.urllib.parse as urlparse import sqlalchemy from xmonitor.common import crypt from xmonitor.common import exception from xmonitor.i18n import _, _LE, _LI, _LW LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('metadata_encryption_key', 'xmonitor.common.config') def migrate_location_credentials(migrate_engine, to_quoted): """ Migrate location credentials for encrypted swift uri's between the quoted and unquoted forms. :param migrate_engine: The configured db engine :param to_quoted: If True, migrate location credentials from unquoted to quoted form. If False, do the reverse. """ if not CONF.metadata_encryption_key: msg = _LI("'metadata_encryption_key' was not specified in the config" " file or a config file was not specified. This means that" " this migration is a NOOP.") LOG.info(msg) return meta = sqlalchemy.schema.MetaData() meta.bind = migrate_engine images_table = sqlalchemy.Table('images', meta, autoload=True) images = list(images_table.select().execute()) for image in images: try: fixed_uri = fix_uri_credentials(image['location'], to_quoted) images_table.update().where( images_table.c.id == image['id']).values( location=fixed_uri).execute() except exception.Invalid: msg = _LW("Failed to decrypt location value for image" " %(image_id)s") % {'image_id': image['id']} LOG.warn(msg) except exception.BadStoreUri as e: reason = encodeutils.exception_to_unicode(e) msg = _LE("Invalid store uri for image: %(image_id)s. " "Details: %(reason)s") % {'image_id': image.id, 'reason': reason} LOG.exception(msg) raise def fix_uri_credentials(uri, to_quoted): """ Fix the given uri's embedded credentials by round-tripping with StoreLocation. If to_quoted is True, the uri is assumed to have credentials that have not been quoted, and the resulting uri will contain quoted credentials. If to_quoted is False, the uri is assumed to have credentials that have been quoted, and the resulting uri will contain credentials that have not been quoted. """ if not uri: return try: decrypted_uri = decrypt_location(uri) # NOTE (ameade): If a uri is not encrypted or incorrectly encoded then we # we raise an exception. except (TypeError, ValueError) as e: raise exception.Invalid(str(e)) return legacy_parse_uri(decrypted_uri, to_quoted) def legacy_parse_uri(uri, to_quote): """ Parse URLs. This method fixes an issue where credentials specified in the URL are interpreted differently in Python 2.6.1+ than prior versions of Python. It also deals with the peculiarity that new-style Swift URIs have where a username can contain a ':', like so: swift://account:user:pass@authurl.com/container/obj If to_quoted is True, the uri is assumed to have credentials that have not been quoted, and the resulting uri will contain quoted credentials. If to_quoted is False, the uri is assumed to have credentials that have been quoted, and the resulting uri will contain credentials that have not been quoted. """ # Make sure that URIs that contain multiple schemes, such as: # swift://user:pass@http://authurl.com/v1/container/obj # are immediately rejected. if uri.count('://') != 1: reason = _("URI cannot contain more than one occurrence of a scheme." "If you have specified a URI like " "swift://user:pass@http://authurl.com/v1/container/obj" ", you need to change it to use the swift+http:// scheme, " "like so: " "swift+http://user:pass@authurl.com/v1/container/obj") raise exception.BadStoreUri(message=reason) pieces = urlparse.urlparse(uri) if pieces.scheme not in ('swift', 'swift+http', 'swift+https'): raise exception.BadStoreUri(message="Unacceptable scheme: '%s'" % pieces.scheme) scheme = pieces.scheme netloc = pieces.netloc path = pieces.path.lstrip('/') if netloc != '': # > Python 2.6.1 if '@' in netloc: creds, netloc = netloc.split('@') else: creds = None else: # Python 2.6.1 compat # see lp659445 and Python issue7904 if '@' in path: creds, path = path.split('@') else: creds = None netloc = path[0:path.find('/')].strip('/') path = path[path.find('/'):].strip('/') if creds: cred_parts = creds.split(':') # User can be account:user, in which case cred_parts[0:2] will be # the account and user. Combine them into a single username of # account:user if to_quote: if len(cred_parts) == 1: reason = (_("Badly formed credentials '%(creds)s' in Swift " "URI") % {'creds': creds}) raise exception.BadStoreUri(message=reason) elif len(cred_parts) == 3: user = ':'.join(cred_parts[0:2]) else: user = cred_parts[0] key = cred_parts[-1] user = user key = key else: if len(cred_parts) != 2: reason = (_("Badly formed credentials in Swift URI.")) raise exception.BadStoreUri(message=reason) user, key = cred_parts user = urlparse.unquote(user) key = urlparse.unquote(key) else: user = None key = None path_parts = path.split('/') try: obj = path_parts.pop() container = path_parts.pop() if not netloc.startswith('http'): # push hostname back into the remaining to build full authurl path_parts.insert(0, netloc) auth_or_store_url = '/'.join(path_parts) except IndexError: reason = _("Badly formed S3 URI: %(uri)s") % {'uri': uri} raise exception.BadStoreUri(message=reason) if auth_or_store_url.startswith('http://'): auth_or_store_url = auth_or_store_url[len('http://'):] elif auth_or_store_url.startswith('https://'): auth_or_store_url = auth_or_store_url[len('https://'):] credstring = '' if user and key: if to_quote: quote_user = urlparse.quote(user) quote_key = urlparse.quote(key) else: quote_user = user quote_key = key credstring = '%s:%s@' % (quote_user, quote_key) auth_or_store_url = auth_or_store_url.strip('/') container = container.strip('/') obj = obj.strip('/') uri = '%s://%s%s/%s/%s' % (scheme, credstring, auth_or_store_url, container, obj) return encrypt_location(uri)
36.106557
78
0.632123
# Copyright 2013 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ This migration handles migrating encrypted image location values from the unquoted form to the quoted form. If 'metadata_encryption_key' is specified in the config then this migration performs the following steps for every entry in the images table: 1. Decrypt the location value with the metadata_encryption_key 2. Changes the value to its quoted form 3. Encrypts the new value with the metadata_encryption_key 4. Inserts the new value back into the row Fixes bug #1081043 """ import types # noqa from oslo_config import cfg from oslo_log import log as logging from oslo_utils import encodeutils import six.moves.urllib.parse as urlparse import sqlalchemy from xmonitor.common import crypt from xmonitor.common import exception from xmonitor.i18n import _, _LE, _LI, _LW LOG = logging.getLogger(__name__) CONF = cfg.CONF CONF.import_opt('metadata_encryption_key', 'xmonitor.common.config') def upgrade(migrate_engine): migrate_location_credentials(migrate_engine, to_quoted=True) def downgrade(migrate_engine): migrate_location_credentials(migrate_engine, to_quoted=False) def migrate_location_credentials(migrate_engine, to_quoted): """ Migrate location credentials for encrypted swift uri's between the quoted and unquoted forms. :param migrate_engine: The configured db engine :param to_quoted: If True, migrate location credentials from unquoted to quoted form. If False, do the reverse. """ if not CONF.metadata_encryption_key: msg = _LI("'metadata_encryption_key' was not specified in the config" " file or a config file was not specified. This means that" " this migration is a NOOP.") LOG.info(msg) return meta = sqlalchemy.schema.MetaData() meta.bind = migrate_engine images_table = sqlalchemy.Table('images', meta, autoload=True) images = list(images_table.select().execute()) for image in images: try: fixed_uri = fix_uri_credentials(image['location'], to_quoted) images_table.update().where( images_table.c.id == image['id']).values( location=fixed_uri).execute() except exception.Invalid: msg = _LW("Failed to decrypt location value for image" " %(image_id)s") % {'image_id': image['id']} LOG.warn(msg) except exception.BadStoreUri as e: reason = encodeutils.exception_to_unicode(e) msg = _LE("Invalid store uri for image: %(image_id)s. " "Details: %(reason)s") % {'image_id': image.id, 'reason': reason} LOG.exception(msg) raise def decrypt_location(uri): return crypt.urlsafe_decrypt(CONF.metadata_encryption_key, uri) def encrypt_location(uri): return crypt.urlsafe_encrypt(CONF.metadata_encryption_key, uri, 64) def fix_uri_credentials(uri, to_quoted): """ Fix the given uri's embedded credentials by round-tripping with StoreLocation. If to_quoted is True, the uri is assumed to have credentials that have not been quoted, and the resulting uri will contain quoted credentials. If to_quoted is False, the uri is assumed to have credentials that have been quoted, and the resulting uri will contain credentials that have not been quoted. """ if not uri: return try: decrypted_uri = decrypt_location(uri) # NOTE (ameade): If a uri is not encrypted or incorrectly encoded then we # we raise an exception. except (TypeError, ValueError) as e: raise exception.Invalid(str(e)) return legacy_parse_uri(decrypted_uri, to_quoted) def legacy_parse_uri(uri, to_quote): """ Parse URLs. This method fixes an issue where credentials specified in the URL are interpreted differently in Python 2.6.1+ than prior versions of Python. It also deals with the peculiarity that new-style Swift URIs have where a username can contain a ':', like so: swift://account:user:pass@authurl.com/container/obj If to_quoted is True, the uri is assumed to have credentials that have not been quoted, and the resulting uri will contain quoted credentials. If to_quoted is False, the uri is assumed to have credentials that have been quoted, and the resulting uri will contain credentials that have not been quoted. """ # Make sure that URIs that contain multiple schemes, such as: # swift://user:pass@http://authurl.com/v1/container/obj # are immediately rejected. if uri.count('://') != 1: reason = _("URI cannot contain more than one occurrence of a scheme." "If you have specified a URI like " "swift://user:pass@http://authurl.com/v1/container/obj" ", you need to change it to use the swift+http:// scheme, " "like so: " "swift+http://user:pass@authurl.com/v1/container/obj") raise exception.BadStoreUri(message=reason) pieces = urlparse.urlparse(uri) if pieces.scheme not in ('swift', 'swift+http', 'swift+https'): raise exception.BadStoreUri(message="Unacceptable scheme: '%s'" % pieces.scheme) scheme = pieces.scheme netloc = pieces.netloc path = pieces.path.lstrip('/') if netloc != '': # > Python 2.6.1 if '@' in netloc: creds, netloc = netloc.split('@') else: creds = None else: # Python 2.6.1 compat # see lp659445 and Python issue7904 if '@' in path: creds, path = path.split('@') else: creds = None netloc = path[0:path.find('/')].strip('/') path = path[path.find('/'):].strip('/') if creds: cred_parts = creds.split(':') # User can be account:user, in which case cred_parts[0:2] will be # the account and user. Combine them into a single username of # account:user if to_quote: if len(cred_parts) == 1: reason = (_("Badly formed credentials '%(creds)s' in Swift " "URI") % {'creds': creds}) raise exception.BadStoreUri(message=reason) elif len(cred_parts) == 3: user = ':'.join(cred_parts[0:2]) else: user = cred_parts[0] key = cred_parts[-1] user = user key = key else: if len(cred_parts) != 2: reason = (_("Badly formed credentials in Swift URI.")) raise exception.BadStoreUri(message=reason) user, key = cred_parts user = urlparse.unquote(user) key = urlparse.unquote(key) else: user = None key = None path_parts = path.split('/') try: obj = path_parts.pop() container = path_parts.pop() if not netloc.startswith('http'): # push hostname back into the remaining to build full authurl path_parts.insert(0, netloc) auth_or_store_url = '/'.join(path_parts) except IndexError: reason = _("Badly formed S3 URI: %(uri)s") % {'uri': uri} raise exception.BadStoreUri(message=reason) if auth_or_store_url.startswith('http://'): auth_or_store_url = auth_or_store_url[len('http://'):] elif auth_or_store_url.startswith('https://'): auth_or_store_url = auth_or_store_url[len('https://'):] credstring = '' if user and key: if to_quote: quote_user = urlparse.quote(user) quote_key = urlparse.quote(key) else: quote_user = user quote_key = key credstring = '%s:%s@' % (quote_user, quote_key) auth_or_store_url = auth_or_store_url.strip('/') container = container.strip('/') obj = obj.strip('/') uri = '%s://%s%s/%s/%s' % (scheme, credstring, auth_or_store_url, container, obj) return encrypt_location(uri)
297
0
92
54fcba5a90b30f291e1a2fab18963885b8029fe5
1,885
py
Python
cm/cm_dhexppot/CM/CM_TUW19/array2raster.py
historeno/enermaps
ad3a97636baa153a56367e374d0fef7f009bf19d
[ "Apache-2.0" ]
null
null
null
cm/cm_dhexppot/CM/CM_TUW19/array2raster.py
historeno/enermaps
ad3a97636baa153a56367e374d0fef7f009bf19d
[ "Apache-2.0" ]
null
null
null
cm/cm_dhexppot/CM/CM_TUW19/array2raster.py
historeno/enermaps
ad3a97636baa153a56367e374d0fef7f009bf19d
[ "Apache-2.0" ]
null
null
null
""" Created on August 14 2018 @author: fallahnejad@eeg.tuwien.ac.at """ import os import sys from osgeo import gdal, osr path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) if path not in sys.path: sys.path.append(path) def array2raster( outRasterPath, geo_transform, dataType, array, noDataValue=0, OutputRasterSRS=3035 ): """ This function rasterizes the input numpy array. The input array and the geo_transform must be given for EPSG3035. """ # conversion of data types from numpy to gdal dict_varTyp = { "int8": gdal.GDT_Byte, "int16": gdal.GDT_Int16, "int32": gdal.GDT_Int32, "uint16": gdal.GDT_UInt16, "uint32": gdal.GDT_UInt32, "float32": gdal.GDT_Float32, "float64": gdal.GDT_Float64, } cols = array.shape[1] rows = array.shape[0] driver = gdal.GetDriverByName("GTiff") outRaster = driver.Create( outRasterPath, cols, rows, 1, dict_varTyp[dataType], ["compress=DEFLATE", "TILED=YES", "TFW=YES", "ZLEVEL=9", "PREDICTOR=1"], ) outRaster.SetGeoTransform(geo_transform) outRasterSRS = osr.SpatialReference() outRasterSRS.ImportFromEPSG(OutputRasterSRS) outRaster.SetProjection(outRasterSRS.ExportToWkt()) outRaster.GetRasterBand(1).SetNoDataValue(noDataValue) if dataType == "int8" or dataType == "uint16": # This can be used for dtype int8 ct = gdal.ColorTable() ct.SetColorEntry(noDataValue, (0, 0, 0, 255)) ct.SetColorEntry(1, (250, 159, 181, 255)) """ for i in range(1, 1+np.max(array)): ct.SetColorEntry(i, tuple(np.random.choice(range(256), size=4))) """ outRaster.GetRasterBand(1).SetColorTable(ct) outRaster.GetRasterBand(1).WriteArray(array) outRaster.FlushCache()
29.920635
86
0.646154
""" Created on August 14 2018 @author: fallahnejad@eeg.tuwien.ac.at """ import os import sys from osgeo import gdal, osr path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) if path not in sys.path: sys.path.append(path) def array2raster( outRasterPath, geo_transform, dataType, array, noDataValue=0, OutputRasterSRS=3035 ): """ This function rasterizes the input numpy array. The input array and the geo_transform must be given for EPSG3035. """ # conversion of data types from numpy to gdal dict_varTyp = { "int8": gdal.GDT_Byte, "int16": gdal.GDT_Int16, "int32": gdal.GDT_Int32, "uint16": gdal.GDT_UInt16, "uint32": gdal.GDT_UInt32, "float32": gdal.GDT_Float32, "float64": gdal.GDT_Float64, } cols = array.shape[1] rows = array.shape[0] driver = gdal.GetDriverByName("GTiff") outRaster = driver.Create( outRasterPath, cols, rows, 1, dict_varTyp[dataType], ["compress=DEFLATE", "TILED=YES", "TFW=YES", "ZLEVEL=9", "PREDICTOR=1"], ) outRaster.SetGeoTransform(geo_transform) outRasterSRS = osr.SpatialReference() outRasterSRS.ImportFromEPSG(OutputRasterSRS) outRaster.SetProjection(outRasterSRS.ExportToWkt()) outRaster.GetRasterBand(1).SetNoDataValue(noDataValue) if dataType == "int8" or dataType == "uint16": # This can be used for dtype int8 ct = gdal.ColorTable() ct.SetColorEntry(noDataValue, (0, 0, 0, 255)) ct.SetColorEntry(1, (250, 159, 181, 255)) """ for i in range(1, 1+np.max(array)): ct.SetColorEntry(i, tuple(np.random.choice(range(256), size=4))) """ outRaster.GetRasterBand(1).SetColorTable(ct) outRaster.GetRasterBand(1).WriteArray(array) outRaster.FlushCache()
0
0
0
9ba66ac0118865fada74771be7bf4006b25265ce
5,286
py
Python
util/scripts/stack-depth-info.py
systems-nuts/popcorn-compiler-alpine
5c225c7d3055db83dc654b6b5525c34bbdacded1
[ "Linux-OpenIB" ]
30
2019-04-07T14:58:31.000Z
2021-05-24T19:07:20.000Z
util/scripts/stack-depth-info.py
XRDevIEEE/popcorn-compiler
2cb2eccc0c75b5963d9fec26ad80a7b881316b1d
[ "Linux-OpenIB" ]
11
2018-07-24T19:31:26.000Z
2020-09-03T08:56:23.000Z
util/scripts/stack-depth-info.py
XRDevIEEE/popcorn-compiler
2cb2eccc0c75b5963d9fec26ad80a7b881316b1d
[ "Linux-OpenIB" ]
17
2018-08-26T12:43:15.000Z
2022-03-18T12:08:40.000Z
#!/usr/bin/python3 import sys, subprocess ############################################################################### # Config ############################################################################### dataFile = None binFile = None onlyFunc = False verbose = False ############################################################################### # Utility functions ############################################################################### ############################################################################### # Driver ############################################################################### skip = False for i in range(len(sys.argv)): if skip: skip = False continue elif sys.argv[i] == "-h" or sys.argv[i] == "--help": printHelp() sys.exit(0) continue elif sys.argv[i] == "-d": dataFile = sys.argv[i+1] skip = True continue elif sys.argv[i] == "-b": binFile = sys.argv[i+1] skip = True continue elif sys.argv[i] == "-f": onlyFunc = True continue elif sys.argv[i] == "-v": verbose = True continue if dataFile == None: print("Please supply a data file!") printHelp() sys.exit(1) if onlyFunc and binFile == None: print("Please supply a binary to print function names!") printHelp() sys.exit(1) avgDepth, maxDepth, funcCalls = parseData(dataFile) if binFile == None: printRaw(dataFile, avgDepth, maxDepth, funcCalls) else: symbols = getSymbolTable(binFile) printDetailed(dataFile, binFile, symbols, avgDepth, maxDepth, funcCalls)
30.732558
104
0.574915
#!/usr/bin/python3 import sys, subprocess ############################################################################### # Config ############################################################################### dataFile = None binFile = None onlyFunc = False verbose = False ############################################################################### # Utility functions ############################################################################### def printHelp(): print("stack-depth-info.py: parse stack depth data and summarize information\n") print("Usage: ./stack-depth-info.py -d file [ OPTIONS ]") print("Options:") print(" -h / --help : print help & exit") print(" -d file : stack depth file dumped by library (usually stack_depth.dat)") print(" -b file : binary from which data was dumped, gives more detailed information") print(" -f : only print names of functions who called the stack depth library (requires -b)") print(" -v : verbose output, prints caller information") def parseData(fileName): numCalls = 0 avgDepth = 0.0 maxDepth = (0, 0, 0) funcCalls = [] # Read in raw data, which is of the format: # (<function>, <# of calls>, <average depth>, (max depth caller, max depth), [<callers>]) fp = open(fileName, 'r') for line in fp: tup = eval(line.strip()) numCalls += tup[1] avgDepth += tup[1] * tup[2] if tup[3][1] > maxDepth[2]: maxDepth = (tup[0], tup[3][0], tup[3][1]) funcCalls.append(tup) fp.close() avgDepth /= float(numCalls) return avgDepth, maxDepth, sorted(funcCalls, key=lambda func: func[1], reverse=True) def printRaw(dataFile, avgDepth, maxDepth, funcCalls): global verbose print("Data from " + dataFile) print("Average depth: {:4.3f}".format(avgDepth)) print("Max depth: " + str(maxDepth[2]) + ", " + hex(maxDepth[0]) + " called by " + hex(maxDepth[1])) print() print("{0:<14s} {1:>12s} {2:>12s}".format("Function:", "Num Calls", "Avg. Depth")) for val in funcCalls: print("0x{0:<12x} {1:>12d} {2:>12.3f}".format(val[0], val[1], val[2])) if verbose: for val in funcCalls: print("\n\r" + hex(val[0]) + " called by:") callers = sorted(val[4], key=lambda func: func[1], reverse=True) for caller in callers: print(" 0x{0:<x}: {1:d} time(s)".format(caller[0], caller[1])) def getSymbolTable(binFile): symbols = {} out = subprocess.check_output(["readelf", "-sW", binFile]) outlines = out.decode("utf-8").split("\n") for line in outlines: if "Symbol table" in line or "Num:" in line: continue else: toks = line.strip().split() if len(toks) < 8: continue startAddr = int(toks[1], base=16) if startAddr == 0: # Skip undefined symbols continue if "x" in toks[2]: size = int(toks[2], base=16) else: size = int(toks[2]) if size == 0: # For dynamically loaded symbols size = 1 name = toks[7].split("@")[0] symbols[name] = (startAddr, size) return symbols def printDetailed(dataFile, binFile, symbols, avgDepth, maxDepth, funcCalls): global onlyFunc global verbose def getSymbol(symbols, addr): for sym in symbols: endAddr = symbols[sym][0] + symbols[sym][1] if symbols[sym][0] <= addr and addr < endAddr: return sym return "(n/a)" if onlyFunc: for val in funcCalls: sym = getSymbol(symbols, val[0]) print(sym) else: print("Data from " + dataFile + ", generated by " + binFile) print("Average depth: {:4.3f}".format(avgDepth)) print("Max depth: " + str(maxDepth[2]) + ", " + \ getSymbol(symbols, maxDepth[0]) + " (" + hex(maxDepth[0]) + ") called by " + \ getSymbol(symbols, maxDepth[1]) + " (" + hex(maxDepth[1]) + ")") print() print("{0:<55s} {1:>12s} {2:>12s}".format("Function:", "Num Calls", "Avg. Depth")) for val in funcCalls: sym = getSymbol(symbols, val[0]) print("{0:<55s} {1:>12d} {2:>12.3f}".format(sym + " (" + hex(val[0]) + ")", val[1], val[2])) if verbose: for val in funcCalls: print("\n\r" + getSymbol(symbols, val[0]) + " called by:") callers = sorted(val[4], key=lambda func: func[1], reverse=True) for caller in callers: sym = getSymbol(symbols, caller[0]) print(" {0:s}: {1:d} time(s)".format(sym + " (" + hex(caller[0]) + ")", caller[1])) ############################################################################### # Driver ############################################################################### skip = False for i in range(len(sys.argv)): if skip: skip = False continue elif sys.argv[i] == "-h" or sys.argv[i] == "--help": printHelp() sys.exit(0) continue elif sys.argv[i] == "-d": dataFile = sys.argv[i+1] skip = True continue elif sys.argv[i] == "-b": binFile = sys.argv[i+1] skip = True continue elif sys.argv[i] == "-f": onlyFunc = True continue elif sys.argv[i] == "-v": verbose = True continue if dataFile == None: print("Please supply a data file!") printHelp() sys.exit(1) if onlyFunc and binFile == None: print("Please supply a binary to print function names!") printHelp() sys.exit(1) avgDepth, maxDepth, funcCalls = parseData(dataFile) if binFile == None: printRaw(dataFile, avgDepth, maxDepth, funcCalls) else: symbols = getSymbolTable(binFile) printDetailed(dataFile, binFile, symbols, avgDepth, maxDepth, funcCalls)
3,657
0
115
d6d71f011810b5ff2754627d1771f5d19d8fd950
178
py
Python
examples/get_user_info.py
suyashjawale/InstagramCLI
56b7f8e1f97ad723c41a15874d5ab5c8967d08c3
[ "MIT" ]
5
2022-01-26T00:15:52.000Z
2022-03-16T17:18:29.000Z
examples/get_user_info.py
suyashjawale/InstagramCLI
56b7f8e1f97ad723c41a15874d5ab5c8967d08c3
[ "MIT" ]
1
2021-11-03T17:26:11.000Z
2021-11-14T05:06:20.000Z
examples/get_user_info.py
suyashjawale/InstagramCLI
56b7f8e1f97ad723c41a15874d5ab5c8967d08c3
[ "MIT" ]
1
2022-01-02T03:50:29.000Z
2022-01-02T03:50:29.000Z
from InstagramCLI import InstagramCLI cli = InstagramCLI(username="", password="") data= cli.get_user_info(target_username="usnjsi77",save_to_device=True) print(data) cli.close()
35.6
71
0.803371
from InstagramCLI import InstagramCLI cli = InstagramCLI(username="", password="") data= cli.get_user_info(target_username="usnjsi77",save_to_device=True) print(data) cli.close()
0
0
0
6a8f79a8e7bb2e7ba26110a94905fe82627d84c0
8,592
py
Python
pymatgen/analysis/molecule_structure_comparator.py
exenGT/pymatgen
a8ffb820ab8fc3f60251099e38c8888f45eae618
[ "MIT" ]
1
2022-03-22T22:08:16.000Z
2022-03-22T22:08:16.000Z
pymatgen/analysis/molecule_structure_comparator.py
exenGT/pymatgen
a8ffb820ab8fc3f60251099e38c8888f45eae618
[ "MIT" ]
null
null
null
pymatgen/analysis/molecule_structure_comparator.py
exenGT/pymatgen
a8ffb820ab8fc3f60251099e38c8888f45eae618
[ "MIT" ]
null
null
null
# Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ This module provides classes to comparsion the structures of the two molecule. As long as the two molecule have the same bond connection tables, the molecules are deemed to be same. The atom in the two molecule must be paired accordingly. This module is supposed to perform rough comparisons with the atom order correspondence prerequisite, while molecule_matcher is supposed to do exact comparisons without the atom order correspondence prerequisite. """ import itertools from monty.json import MSONable __author__ = "Xiaohui Qu" __copyright__ = "Copyright 2011, The Materials Project" __version__ = "1.0" __maintainer__ = "Xiaohui Qu" __email__ = "xhqu1981@gmail.com" __status__ = "Experimental" __date__ = "Jan 22, 2014" class CovalentRadius: """ Covalent Radius of the elements. Beatriz C. et al. Dalton Trans. 2008, 2832-2838. DOI: 10.1039/b801115j """ radius = { "H": 0.31, "He": 0.28, "Li": 1.28, "Be": 0.96, "B": 0.84, "C": 0.73, "N": 0.71, "O": 0.66, "F": 0.57, "Ne": 0.58, "Na": 1.66, "Mg": 1.41, "Al": 1.21, "Si": 1.11, "P": 1.07, "S": 1.05, "Cl": 1.02, "Ar": 1.06, "K": 2.03, "Ca": 1.76, "Sc": 1.70, "Ti": 1.60, "V": 1.53, "Cr": 1.39, "Mn": 1.50, "Fe": 1.42, "Co": 1.38, "Ni": 1.24, "Cu": 1.32, "Zn": 1.22, "Ga": 1.22, "Ge": 1.20, "As": 1.19, "Se": 1.20, "Br": 1.20, "Kr": 1.16, "Rb": 2.20, "Sr": 1.95, "Y": 1.90, "Zr": 1.75, "Nb": 1.64, "Mo": 1.54, "Tc": 1.47, "Ru": 1.46, "Rh": 1.42, "Pd": 1.39, "Ag": 1.45, "Cd": 1.44, "In": 1.42, "Sn": 1.39, "Sb": 1.39, "Te": 1.38, "I": 1.39, "Xe": 1.40, "Cs": 2.44, "Ba": 2.15, "La": 2.07, "Ce": 2.04, "Pr": 2.03, "Nd": 2.01, "Pm": 1.99, "Sm": 1.98, "Eu": 1.98, "Gd": 1.96, "Tb": 1.94, "Dy": 1.92, "Ho": 1.92, "Er": 1.89, "Tm": 1.90, "Yb": 1.87, "Lu": 1.87, "Hf": 1.75, "Ta": 1.70, "W": 1.62, "Re": 1.51, "Os": 1.44, "Ir": 1.41, "Pt": 1.36, "Au": 1.36, "Hg": 1.32, "Tl": 1.45, "Pb": 1.46, "Bi": 1.48, "Po": 1.40, "At": 1.50, "Rn": 1.50, "Fr": 2.60, "Ra": 2.21, "Ac": 2.15, "Th": 2.06, "Pa": 2.00, "U": 1.96, "Np": 1.90, "Pu": 1.87, "Am": 1.80, "Cm": 1.69, } class MoleculeStructureComparator(MSONable): """ Class to check whether the connection tables of the two molecules are the same. The atom in the two molecule must be paired accordingly. """ ionic_element_list = [ "Na", "Mg", "Al", "Sc", "V", "Cr", "Mn", "Fe", "Co", "Ni", "Cu", "Zn", "Ga", "Rb", "Sr", ] halogen_list = ["F", "Cl", "Br", "I"] def __init__( self, bond_length_cap=0.3, covalent_radius=CovalentRadius.radius, priority_bonds=(), priority_cap=0.8, ignore_ionic_bond=True, bond_13_cap=0.05, ): """ Args: bond_length_cap: The ratio of the elongation of the bond to be acknowledged. If the distance between two atoms is less than ( empirical covalent bond length) X (1 + bond_length_cap), the bond between the two atoms will be acknowledged. covalent_radius: The covalent radius of the atoms. dict (element symbol -> radius) priority_bonds: The bonds that are known to be existed in the initial molecule. Such bonds will be acknowledged in a loose criteria. The index should start from 0. priority_cap: The ratio of the elongation of the bond to be acknowledged for the priority bonds. """ self.bond_length_cap = bond_length_cap self.covalent_radius = covalent_radius self.priority_bonds = [tuple(sorted(b)) for b in priority_bonds] self.priority_cap = priority_cap self.ignore_ionic_bond = ignore_ionic_bond self.ignore_halogen_self_bond = True self.bond_13_cap = bond_13_cap def are_equal(self, mol1, mol2): """ Compare the bond table of the two molecules. Args: mol1: first molecule. pymatgen Molecule object. mol2: second moleculs. pymatgen Molecule objec. """ b1 = set(self._get_bonds(mol1)) b2 = set(self._get_bonds(mol2)) return b1 == b2 @staticmethod def get_13_bonds(priority_bonds): """ Args: priority_bonds (): Returns: """ all_bond_pairs = list(itertools.combinations(priority_bonds, r=2)) all_2_bond_atoms = [set(b1 + b2) for b1, b2 in all_bond_pairs] all_13_bond_atoms = [a for a in all_2_bond_atoms if len(a) == 3] all_2_and_13_bonds = { tuple(sorted(b)) for b in itertools.chain(*(itertools.combinations(p, 2) for p in all_13_bond_atoms)) } bonds_13 = all_2_and_13_bonds - {tuple(b) for b in priority_bonds} return tuple(sorted(bonds_13)) def _get_bonds(self, mol): """ Find all the bond in a molcule Args: mol: the molecule. pymatgen Molecule object Returns: List of tuple. Each tuple correspond to a bond represented by the id of the two end atoms. """ num_atoms = len(mol) # index starting from 0 if self.ignore_ionic_bond: covalent_atoms = [i for i in range(num_atoms) if mol.species[i].symbol not in self.ionic_element_list] else: covalent_atoms = list(range(num_atoms)) all_pairs = list(itertools.combinations(covalent_atoms, 2)) pair_dists = [mol.get_distance(*p) for p in all_pairs] elements = mol.composition.as_dict().keys() unavailable_elements = list(set(elements) - set(self.covalent_radius.keys())) if len(unavailable_elements) > 0: raise ValueError(f"The covalent radius for element {unavailable_elements} is not available") bond_13 = self.get_13_bonds(self.priority_bonds) max_length = [ (self.covalent_radius[mol.sites[p[0]].specie.symbol] + self.covalent_radius[mol.sites[p[1]].specie.symbol]) * ( 1 + ( self.priority_cap if p in self.priority_bonds else (self.bond_length_cap if p not in bond_13 else self.bond_13_cap) ) ) * ( 0.1 if ( self.ignore_halogen_self_bond and p not in self.priority_bonds and mol.sites[p[0]].specie.symbol in self.halogen_list and mol.sites[p[1]].specie.symbol in self.halogen_list ) else 1.0 ) for p in all_pairs ] bonds = [bond for bond, dist, cap in zip(all_pairs, pair_dists, max_length) if dist <= cap] return bonds def as_dict(self): """ Returns: MSONable dict """ return { "version": __version__, "@module": self.__class__.__module__, "@class": self.__class__.__name__, "bond_length_cap": self.bond_length_cap, "covalent_radius": self.covalent_radius, "priority_bonds": self.priority_bonds, "priority_cap": self.priority_cap, } @classmethod def from_dict(cls, d): """ Args: d (dict): Dict representation Returns: MoleculeStructureComparator """ return MoleculeStructureComparator( bond_length_cap=d["bond_length_cap"], covalent_radius=d["covalent_radius"], priority_bonds=d["priority_bonds"], priority_cap=d["priority_cap"], )
28.54485
119
0.516527
# Copyright (c) Pymatgen Development Team. # Distributed under the terms of the MIT License. """ This module provides classes to comparsion the structures of the two molecule. As long as the two molecule have the same bond connection tables, the molecules are deemed to be same. The atom in the two molecule must be paired accordingly. This module is supposed to perform rough comparisons with the atom order correspondence prerequisite, while molecule_matcher is supposed to do exact comparisons without the atom order correspondence prerequisite. """ import itertools from monty.json import MSONable __author__ = "Xiaohui Qu" __copyright__ = "Copyright 2011, The Materials Project" __version__ = "1.0" __maintainer__ = "Xiaohui Qu" __email__ = "xhqu1981@gmail.com" __status__ = "Experimental" __date__ = "Jan 22, 2014" class CovalentRadius: """ Covalent Radius of the elements. Beatriz C. et al. Dalton Trans. 2008, 2832-2838. DOI: 10.1039/b801115j """ radius = { "H": 0.31, "He": 0.28, "Li": 1.28, "Be": 0.96, "B": 0.84, "C": 0.73, "N": 0.71, "O": 0.66, "F": 0.57, "Ne": 0.58, "Na": 1.66, "Mg": 1.41, "Al": 1.21, "Si": 1.11, "P": 1.07, "S": 1.05, "Cl": 1.02, "Ar": 1.06, "K": 2.03, "Ca": 1.76, "Sc": 1.70, "Ti": 1.60, "V": 1.53, "Cr": 1.39, "Mn": 1.50, "Fe": 1.42, "Co": 1.38, "Ni": 1.24, "Cu": 1.32, "Zn": 1.22, "Ga": 1.22, "Ge": 1.20, "As": 1.19, "Se": 1.20, "Br": 1.20, "Kr": 1.16, "Rb": 2.20, "Sr": 1.95, "Y": 1.90, "Zr": 1.75, "Nb": 1.64, "Mo": 1.54, "Tc": 1.47, "Ru": 1.46, "Rh": 1.42, "Pd": 1.39, "Ag": 1.45, "Cd": 1.44, "In": 1.42, "Sn": 1.39, "Sb": 1.39, "Te": 1.38, "I": 1.39, "Xe": 1.40, "Cs": 2.44, "Ba": 2.15, "La": 2.07, "Ce": 2.04, "Pr": 2.03, "Nd": 2.01, "Pm": 1.99, "Sm": 1.98, "Eu": 1.98, "Gd": 1.96, "Tb": 1.94, "Dy": 1.92, "Ho": 1.92, "Er": 1.89, "Tm": 1.90, "Yb": 1.87, "Lu": 1.87, "Hf": 1.75, "Ta": 1.70, "W": 1.62, "Re": 1.51, "Os": 1.44, "Ir": 1.41, "Pt": 1.36, "Au": 1.36, "Hg": 1.32, "Tl": 1.45, "Pb": 1.46, "Bi": 1.48, "Po": 1.40, "At": 1.50, "Rn": 1.50, "Fr": 2.60, "Ra": 2.21, "Ac": 2.15, "Th": 2.06, "Pa": 2.00, "U": 1.96, "Np": 1.90, "Pu": 1.87, "Am": 1.80, "Cm": 1.69, } class MoleculeStructureComparator(MSONable): """ Class to check whether the connection tables of the two molecules are the same. The atom in the two molecule must be paired accordingly. """ ionic_element_list = [ "Na", "Mg", "Al", "Sc", "V", "Cr", "Mn", "Fe", "Co", "Ni", "Cu", "Zn", "Ga", "Rb", "Sr", ] halogen_list = ["F", "Cl", "Br", "I"] def __init__( self, bond_length_cap=0.3, covalent_radius=CovalentRadius.radius, priority_bonds=(), priority_cap=0.8, ignore_ionic_bond=True, bond_13_cap=0.05, ): """ Args: bond_length_cap: The ratio of the elongation of the bond to be acknowledged. If the distance between two atoms is less than ( empirical covalent bond length) X (1 + bond_length_cap), the bond between the two atoms will be acknowledged. covalent_radius: The covalent radius of the atoms. dict (element symbol -> radius) priority_bonds: The bonds that are known to be existed in the initial molecule. Such bonds will be acknowledged in a loose criteria. The index should start from 0. priority_cap: The ratio of the elongation of the bond to be acknowledged for the priority bonds. """ self.bond_length_cap = bond_length_cap self.covalent_radius = covalent_radius self.priority_bonds = [tuple(sorted(b)) for b in priority_bonds] self.priority_cap = priority_cap self.ignore_ionic_bond = ignore_ionic_bond self.ignore_halogen_self_bond = True self.bond_13_cap = bond_13_cap def are_equal(self, mol1, mol2): """ Compare the bond table of the two molecules. Args: mol1: first molecule. pymatgen Molecule object. mol2: second moleculs. pymatgen Molecule objec. """ b1 = set(self._get_bonds(mol1)) b2 = set(self._get_bonds(mol2)) return b1 == b2 @staticmethod def get_13_bonds(priority_bonds): """ Args: priority_bonds (): Returns: """ all_bond_pairs = list(itertools.combinations(priority_bonds, r=2)) all_2_bond_atoms = [set(b1 + b2) for b1, b2 in all_bond_pairs] all_13_bond_atoms = [a for a in all_2_bond_atoms if len(a) == 3] all_2_and_13_bonds = { tuple(sorted(b)) for b in itertools.chain(*(itertools.combinations(p, 2) for p in all_13_bond_atoms)) } bonds_13 = all_2_and_13_bonds - {tuple(b) for b in priority_bonds} return tuple(sorted(bonds_13)) def _get_bonds(self, mol): """ Find all the bond in a molcule Args: mol: the molecule. pymatgen Molecule object Returns: List of tuple. Each tuple correspond to a bond represented by the id of the two end atoms. """ num_atoms = len(mol) # index starting from 0 if self.ignore_ionic_bond: covalent_atoms = [i for i in range(num_atoms) if mol.species[i].symbol not in self.ionic_element_list] else: covalent_atoms = list(range(num_atoms)) all_pairs = list(itertools.combinations(covalent_atoms, 2)) pair_dists = [mol.get_distance(*p) for p in all_pairs] elements = mol.composition.as_dict().keys() unavailable_elements = list(set(elements) - set(self.covalent_radius.keys())) if len(unavailable_elements) > 0: raise ValueError(f"The covalent radius for element {unavailable_elements} is not available") bond_13 = self.get_13_bonds(self.priority_bonds) max_length = [ (self.covalent_radius[mol.sites[p[0]].specie.symbol] + self.covalent_radius[mol.sites[p[1]].specie.symbol]) * ( 1 + ( self.priority_cap if p in self.priority_bonds else (self.bond_length_cap if p not in bond_13 else self.bond_13_cap) ) ) * ( 0.1 if ( self.ignore_halogen_self_bond and p not in self.priority_bonds and mol.sites[p[0]].specie.symbol in self.halogen_list and mol.sites[p[1]].specie.symbol in self.halogen_list ) else 1.0 ) for p in all_pairs ] bonds = [bond for bond, dist, cap in zip(all_pairs, pair_dists, max_length) if dist <= cap] return bonds def as_dict(self): """ Returns: MSONable dict """ return { "version": __version__, "@module": self.__class__.__module__, "@class": self.__class__.__name__, "bond_length_cap": self.bond_length_cap, "covalent_radius": self.covalent_radius, "priority_bonds": self.priority_bonds, "priority_cap": self.priority_cap, } @classmethod def from_dict(cls, d): """ Args: d (dict): Dict representation Returns: MoleculeStructureComparator """ return MoleculeStructureComparator( bond_length_cap=d["bond_length_cap"], covalent_radius=d["covalent_radius"], priority_bonds=d["priority_bonds"], priority_cap=d["priority_cap"], )
0
0
0
f836aea2e60665ebbee4033654498c2a4b470d74
1,858
py
Python
ThenWhatTree/execute_tree.py
intel/ThenWhatTree
ddcd5af58e55705ba2dd6a824828f3fc48bd68da
[ "BSD-3-Clause" ]
3
2018-05-23T17:15:39.000Z
2019-03-17T18:44:19.000Z
ThenWhatTree/execute_tree.py
intel/ThenWhatTree
ddcd5af58e55705ba2dd6a824828f3fc48bd68da
[ "BSD-3-Clause" ]
null
null
null
ThenWhatTree/execute_tree.py
intel/ThenWhatTree
ddcd5af58e55705ba2dd6a824828f3fc48bd68da
[ "BSD-3-Clause" ]
4
2019-11-02T01:15:21.000Z
2021-09-16T11:01:29.000Z
#!/usr/bin/env python3 # Copyright (C) 2018 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause """Function for evaluating the nodes of a decision tree and extracting out the results from each of the nodes.""" # Import built in modules import argparse import sys import os # Import 3rd party modules # Import local modules from ThenWhatTree import _get_file_type from ThenWhatTree import evaluate from ThenWhatTree import extract # Module authorship metadata __author__ = "Erik W Berg" __copyright__ = "Copyright 2018, Intel Corporation" __credits__ = [""] __license__ = "BSD-3-Clause" __version__ = "1.0" __maintainer__ = "Erik W Berg" __email__ = "" __status__ = "Production" # Prototype, Development, Production # Code starts here def parse_args(): ''' Function to parse the cmdline, check that required args are present and that the file types are correct :return: ''' parser = argparse.ArgumentParser() parser.add_argument('--xml', help='path to xml file defining the decision tree', nargs='?', type=str) args = parser.parse_args() check_cmd_line_args(args) check_xml_file_type(args) return args def check_cmd_line_args(args): ''' Raise an exception if an XML file was not passed in on the cmd line. :param args: :return: none ''' if not args.xml: raise Exception('--xml switch must be populated') def check_xml_file_type(args): ''' Raise an exception if the cmd line switch does not match the type of the passed file :param args: :return: none ''' if 'XML' not in _get_file_type(args.xml): raise Exception(sys.argv[1] + ' is not a XML file') if __name__ == '__main__': args = parse_args() os.environ['PYTHONDONTWRITEBYTECODE'] = 'TRUE' tree_object = evaluate(args.xml) print(extract(tree_object.getroot()))
25.108108
113
0.703983
#!/usr/bin/env python3 # Copyright (C) 2018 Intel Corporation # SPDX-License-Identifier: BSD-3-Clause """Function for evaluating the nodes of a decision tree and extracting out the results from each of the nodes.""" # Import built in modules import argparse import sys import os # Import 3rd party modules # Import local modules from ThenWhatTree import _get_file_type from ThenWhatTree import evaluate from ThenWhatTree import extract # Module authorship metadata __author__ = "Erik W Berg" __copyright__ = "Copyright 2018, Intel Corporation" __credits__ = [""] __license__ = "BSD-3-Clause" __version__ = "1.0" __maintainer__ = "Erik W Berg" __email__ = "" __status__ = "Production" # Prototype, Development, Production # Code starts here def parse_args(): ''' Function to parse the cmdline, check that required args are present and that the file types are correct :return: ''' parser = argparse.ArgumentParser() parser.add_argument('--xml', help='path to xml file defining the decision tree', nargs='?', type=str) args = parser.parse_args() check_cmd_line_args(args) check_xml_file_type(args) return args def check_cmd_line_args(args): ''' Raise an exception if an XML file was not passed in on the cmd line. :param args: :return: none ''' if not args.xml: raise Exception('--xml switch must be populated') def check_xml_file_type(args): ''' Raise an exception if the cmd line switch does not match the type of the passed file :param args: :return: none ''' if 'XML' not in _get_file_type(args.xml): raise Exception(sys.argv[1] + ' is not a XML file') if __name__ == '__main__': args = parse_args() os.environ['PYTHONDONTWRITEBYTECODE'] = 'TRUE' tree_object = evaluate(args.xml) print(extract(tree_object.getroot()))
0
0
0
5e9d965ec0e648c42b7858dcd55dc6a562d14c96
140
py
Python
DOM/game/item.py
AlexDev-py/CubIC
7932d2789c0b45ebc9ce631d21f6bed99d3a3a51
[ "MIT" ]
2
2022-02-05T13:06:28.000Z
2022-02-09T07:07:11.000Z
DOM/game/item.py
AlexDev-py/CubIC
7932d2789c0b45ebc9ce631d21f6bed99d3a3a51
[ "MIT" ]
null
null
null
DOM/game/item.py
AlexDev-py/CubIC
7932d2789c0b45ebc9ce631d21f6bed99d3a3a51
[ "MIT" ]
2
2022-01-24T13:42:45.000Z
2022-02-08T09:18:58.000Z
from dataclasses import dataclass @dataclass
12.727273
33
0.642857
from dataclasses import dataclass @dataclass class Item: name: str lvl: int price: int icon: str desc: dict[str, str]
0
71
22
2773a2b5fb1ba7a66e740bb2804aab93cde6bb13
10,125
py
Python
sfw_torch.py
bwilder0/tb_code_release
e0994116cff1ec0c04e0297f2b558ec5dd53f765
[ "MIT" ]
null
null
null
sfw_torch.py
bwilder0/tb_code_release
e0994116cff1ec0c04e0297f2b558ec5dd53f765
[ "MIT" ]
null
null
null
sfw_torch.py
bwilder0/tb_code_release
e0994116cff1ec0c04e0297f2b558ec5dd53f765
[ "MIT" ]
null
null
null
import torch import numpy as np def run_sis(T, G, S, I, newI, nu, mu, d, beta, N): ''' Runs the linearized SIS model, returning the total number of infected agents summed over all time steps. ''' #duplicate these variables along an additional axis to match the batch size nu = torch.diag(1 - nu).expand_as(beta) d = torch.diag(1 - d).expand_as(beta) G = G.expand_as(beta) #run the main loop for the linearized disease dynamics total_infected = I.mean(dim=0).sum() for t in range(1, T): new_infections = S[t-1] @ mu[t-1] @ beta @ N[t-1] old_infections = nu @ d A = G @ (new_infections + old_infections) I = A @ I I = I + newI[t] total_infected += I.mean(dim=0).sum() return total_infected def run_seis(T, G, S, I, newI, nu, mu, d, beta, N, alpha_fast, alpha_slow, E): ''' Runs the linearized SEIS model, returning the total number of infected agents summed over all time steps. ''' #duplicate these variables along an additional axis to match the batch size nu = torch.diag(1 - nu).expand_as(beta) d = torch.diag(1 - d).expand_as(beta) G = G.expand_as(beta) E = E.expand_as(beta) alpha_fast = torch.diag(alpha_fast).expand_as(beta) alpha_slow = alpha_slow.expand_as(beta) #run the main loop for the linearized disease dynamics total_infected = I.mean(dim=0).sum() import numpy as np infections_time = np.zeros((T, 100)) for t in range(1, T): new_infections = S[t-1] @ mu[t-1] @ beta @ N[t-1] @ I # print(new_infections.shape) # print(alpha_fast.shape) new_infections_active = alpha_fast @ new_infections new_infections_latent = new_infections - new_infections_active E = mu[t-1] @ E activations = alpha_slow*E E = (1 - alpha_slow)*E # print(E[:, 0].shape) # print(new_infections_latent.squeeze().shape) E[:, 0] += new_infections_latent.squeeze() E = G @ E @ G old_infections = nu @ d @ I # print(new_infections_active.shape) # print(old_infections.shape) # print(newI[t].shape) # print(activations.sum(dim=2).shape) # print(I.shape) I = new_infections_active + old_infections + newI[t] + activations.sum(dim=2).view_as(I) I = G @ I # infections_time.append(I.mean(dim=0).sum().item()) for j in range(100): total_pop = (1./torch.diag(N[t, j])).sum() infections_time[t, j] = I[j].sum()/total_pop if t == 1 and j == 0: print(total_pop.item(), I[j].sum().item()) # total_infected += I.mean(dim=0).sum() return infections_time def run_seis_information(T, G, S, I, newI, nu, mu, d, beta, N, alpha_fast, alpha_slow, E, beta_information, nu_max): ''' Runs the linearized SEIS model, returning the total number of infected agents summed over all time steps. ''' #duplicate these variables along an additional axis to match the batch size informed = nu.view(len(nu), 1) informed = informed.expand(beta.shape[0], *informed.shape) nu = torch.diag(1 - nu).expand_as(beta) d = torch.diag(1 - d).expand_as(beta) G = G.expand_as(beta) E = E.expand_as(beta) alpha_fast = torch.diag(alpha_fast).expand_as(beta) alpha_slow = alpha_slow.expand_as(beta) #keep track of infected, latent, and informed at each time step all_infections = torch.zeros(T, beta.shape[1], 1) all_E = torch.zeros(T, E.shape[1], E.shape[2]) all_F = torch.zeros_like(all_infections) #run the main loop for the linearized disease dynamics for t in range(1, T): #update nu with new information spread not_informed_fraction = 1 - informed not_informed_fraction_diag = vector_to_diag(not_informed_fraction, beta) #constant scaling the beta for information spread informed = 0.1*not_informed_fraction_diag@beta_information@informed + informed nu = nu_max*informed nu = vector_to_diag(1 - nu, beta) #infections new_infections = S[t-1] @ mu[t-1] @ beta @ N[t-1] @ I new_infections_active = alpha_fast @ new_infections new_infections_latent = new_infections - new_infections_active E = mu[t-1] @ E activations = alpha_slow*E E = (1 - alpha_slow)*E E[:, 0] += new_infections_latent.squeeze() E = G @ E @ G old_infections = nu @ d @ I I = new_infections_active + old_infections + newI[t] + activations.sum(dim=2).view_as(I) I = G @ I #return E, I, F by time and age group #mean across samples all_infections[t] = I.mean(dim=0) all_E[t] = E.mean(dim=0) all_F[t] = informed.mean(dim = 0) return all_infections, all_E, all_F def run_seis_information_new(T, G, S, I, migration_I, migration_E, nu, mu, d, beta, N, alpha_fast, alpha_slow, E, beta_information, nu_max): ''' Runs the linearized SEIS model, returning the total number of infected agents summed over all time steps. ''' #read in for first period of F, informed #nu_sq = np.loadtxt('ann2018_clearanceProb.csv.csv', delimiter=',', skiprows=1) #nu_sq[np.isnan(nu_sq)] = 0 #nu_sq = nu_sq.mean(axis = 0) #nu_sq = torch.from_numpy(nu_sq) #duplicate these variables along an additional axis to match the batch size beta = beta.expand_as(G) informed = nu.view(len(nu), 1) informed = informed.expand(beta.shape[0], *informed.shape) nu = torch.diag(1 - nu).expand_as(beta) num_samples = G.shape[0] #keep track of infected, latent, and informed at each time step all_I = torch.zeros(T, num_samples, beta.shape[1], 1).double() all_E = torch.zeros(T, num_samples, E.shape[1], E.shape[2]).double() all_F = torch.zeros_like(all_I).double() all_I[0] = I[0] all_E[0] = E[0] #all_I[0] = I[30] #all_E[0] = E[30] all_F[0] = informed #run the main loop for the linearized disease dynamics for t in range(1, T): #update nu with new information spread not_informed_fraction = 1 - informed not_informed_fraction_diag = vector_to_diag(not_informed_fraction, beta) #constant scaling the beta for information spread informed = not_informed_fraction_diag@beta_information@informed + informed #print('here is info beta mat') #print(beta_information) #print('here is informed') #print(informed) #debug sze nu = nu_max*informed nu = vector_to_diag(1 - nu, beta) #infections new_infections = S[t-1] @ mu @ beta @ N[t-1] @ I new_infections_active = alpha_fast @ new_infections new_infections_latent = new_infections - new_infections_active E = mu @ E activations = alpha_slow@E E = E - activations E += new_infections_latent E = G @ E + migration_E[t] #CHANGING TO USING THE LAST MIGRATION PERIOD #E = G @ E + migration_E[30] old_infections = nu @ d @ I I = new_infections_active + old_infections + activations I = G @ I + migration_I[t] #CHANGING TO USING THE LAST MIGRATION PERIOD #I = G @ I + migration_I[30] #return E, I, F by time and age group #mean across samples all_I[t] = I all_E[t] = E all_F[t] = informed #print(all_I) return all_I, all_E, all_F class SISInstance(): """ Represents an instantiation of the SIS model with a particular (distribution over) parameters. Foward pass computes total infections as a function of nu, backward computes gradient wrt nu. """ def greedy(grad, U, L, K): ''' Greedily select budget number of elements with highest weight according to grad ''' sorted_groups = torch.sort(grad)[1] nu = L.clone() curr = 0 while (nu - L).sum() < K and curr < len(grad): amount_add = min([U[sorted_groups[curr]] - L[sorted_groups[curr]], K - (nu - L).sum()]) nu[[sorted_groups[curr]]] += amount_add curr += 1 return nu
37.5
169
0.618469
import torch import numpy as np def run_sis(T, G, S, I, newI, nu, mu, d, beta, N): ''' Runs the linearized SIS model, returning the total number of infected agents summed over all time steps. ''' #duplicate these variables along an additional axis to match the batch size nu = torch.diag(1 - nu).expand_as(beta) d = torch.diag(1 - d).expand_as(beta) G = G.expand_as(beta) #run the main loop for the linearized disease dynamics total_infected = I.mean(dim=0).sum() for t in range(1, T): new_infections = S[t-1] @ mu[t-1] @ beta @ N[t-1] old_infections = nu @ d A = G @ (new_infections + old_infections) I = A @ I I = I + newI[t] total_infected += I.mean(dim=0).sum() return total_infected def run_seis(T, G, S, I, newI, nu, mu, d, beta, N, alpha_fast, alpha_slow, E): ''' Runs the linearized SEIS model, returning the total number of infected agents summed over all time steps. ''' #duplicate these variables along an additional axis to match the batch size nu = torch.diag(1 - nu).expand_as(beta) d = torch.diag(1 - d).expand_as(beta) G = G.expand_as(beta) E = E.expand_as(beta) alpha_fast = torch.diag(alpha_fast).expand_as(beta) alpha_slow = alpha_slow.expand_as(beta) #run the main loop for the linearized disease dynamics total_infected = I.mean(dim=0).sum() import numpy as np infections_time = np.zeros((T, 100)) for t in range(1, T): new_infections = S[t-1] @ mu[t-1] @ beta @ N[t-1] @ I # print(new_infections.shape) # print(alpha_fast.shape) new_infections_active = alpha_fast @ new_infections new_infections_latent = new_infections - new_infections_active E = mu[t-1] @ E activations = alpha_slow*E E = (1 - alpha_slow)*E # print(E[:, 0].shape) # print(new_infections_latent.squeeze().shape) E[:, 0] += new_infections_latent.squeeze() E = G @ E @ G old_infections = nu @ d @ I # print(new_infections_active.shape) # print(old_infections.shape) # print(newI[t].shape) # print(activations.sum(dim=2).shape) # print(I.shape) I = new_infections_active + old_infections + newI[t] + activations.sum(dim=2).view_as(I) I = G @ I # infections_time.append(I.mean(dim=0).sum().item()) for j in range(100): total_pop = (1./torch.diag(N[t, j])).sum() infections_time[t, j] = I[j].sum()/total_pop if t == 1 and j == 0: print(total_pop.item(), I[j].sum().item()) # total_infected += I.mean(dim=0).sum() return infections_time def vector_to_diag(not_informed_fraction, beta): not_informed_fraction_diag = torch.zeros_like(beta) for s in range(beta.shape[0]): not_informed_fraction_diag[s] = torch.diag(not_informed_fraction[s].squeeze()) # print('diag output') # print(not_informed_fraction_diag) return not_informed_fraction_diag def run_seis_information(T, G, S, I, newI, nu, mu, d, beta, N, alpha_fast, alpha_slow, E, beta_information, nu_max): ''' Runs the linearized SEIS model, returning the total number of infected agents summed over all time steps. ''' #duplicate these variables along an additional axis to match the batch size informed = nu.view(len(nu), 1) informed = informed.expand(beta.shape[0], *informed.shape) nu = torch.diag(1 - nu).expand_as(beta) d = torch.diag(1 - d).expand_as(beta) G = G.expand_as(beta) E = E.expand_as(beta) alpha_fast = torch.diag(alpha_fast).expand_as(beta) alpha_slow = alpha_slow.expand_as(beta) #keep track of infected, latent, and informed at each time step all_infections = torch.zeros(T, beta.shape[1], 1) all_E = torch.zeros(T, E.shape[1], E.shape[2]) all_F = torch.zeros_like(all_infections) #run the main loop for the linearized disease dynamics for t in range(1, T): #update nu with new information spread not_informed_fraction = 1 - informed not_informed_fraction_diag = vector_to_diag(not_informed_fraction, beta) #constant scaling the beta for information spread informed = 0.1*not_informed_fraction_diag@beta_information@informed + informed nu = nu_max*informed nu = vector_to_diag(1 - nu, beta) #infections new_infections = S[t-1] @ mu[t-1] @ beta @ N[t-1] @ I new_infections_active = alpha_fast @ new_infections new_infections_latent = new_infections - new_infections_active E = mu[t-1] @ E activations = alpha_slow*E E = (1 - alpha_slow)*E E[:, 0] += new_infections_latent.squeeze() E = G @ E @ G old_infections = nu @ d @ I I = new_infections_active + old_infections + newI[t] + activations.sum(dim=2).view_as(I) I = G @ I #return E, I, F by time and age group #mean across samples all_infections[t] = I.mean(dim=0) all_E[t] = E.mean(dim=0) all_F[t] = informed.mean(dim = 0) return all_infections, all_E, all_F def run_seis_information_new(T, G, S, I, migration_I, migration_E, nu, mu, d, beta, N, alpha_fast, alpha_slow, E, beta_information, nu_max): ''' Runs the linearized SEIS model, returning the total number of infected agents summed over all time steps. ''' #read in for first period of F, informed #nu_sq = np.loadtxt('ann2018_clearanceProb.csv.csv', delimiter=',', skiprows=1) #nu_sq[np.isnan(nu_sq)] = 0 #nu_sq = nu_sq.mean(axis = 0) #nu_sq = torch.from_numpy(nu_sq) #duplicate these variables along an additional axis to match the batch size beta = beta.expand_as(G) informed = nu.view(len(nu), 1) informed = informed.expand(beta.shape[0], *informed.shape) nu = torch.diag(1 - nu).expand_as(beta) num_samples = G.shape[0] #keep track of infected, latent, and informed at each time step all_I = torch.zeros(T, num_samples, beta.shape[1], 1).double() all_E = torch.zeros(T, num_samples, E.shape[1], E.shape[2]).double() all_F = torch.zeros_like(all_I).double() all_I[0] = I[0] all_E[0] = E[0] #all_I[0] = I[30] #all_E[0] = E[30] all_F[0] = informed #run the main loop for the linearized disease dynamics for t in range(1, T): #update nu with new information spread not_informed_fraction = 1 - informed not_informed_fraction_diag = vector_to_diag(not_informed_fraction, beta) #constant scaling the beta for information spread informed = not_informed_fraction_diag@beta_information@informed + informed #print('here is info beta mat') #print(beta_information) #print('here is informed') #print(informed) #debug sze nu = nu_max*informed nu = vector_to_diag(1 - nu, beta) #infections new_infections = S[t-1] @ mu @ beta @ N[t-1] @ I new_infections_active = alpha_fast @ new_infections new_infections_latent = new_infections - new_infections_active E = mu @ E activations = alpha_slow@E E = E - activations E += new_infections_latent E = G @ E + migration_E[t] #CHANGING TO USING THE LAST MIGRATION PERIOD #E = G @ E + migration_E[30] old_infections = nu @ d @ I I = new_infections_active + old_infections + activations I = G @ I + migration_I[t] #CHANGING TO USING THE LAST MIGRATION PERIOD #I = G @ I + migration_I[30] #return E, I, F by time and age group #mean across samples all_I[t] = I all_E[t] = E all_F[t] = informed #print(all_I) return all_I, all_E, all_F class SISInstance(): """ Represents an instantiation of the SIS model with a particular (distribution over) parameters. Foward pass computes total infections as a function of nu, backward computes gradient wrt nu. """ def __init__(self, T, G, S, I, newI, mu, d, beta, N): self.T = T self.G = G self.S = S self.I = I self.newI = newI self.mu = mu self.d = d self.beta = beta self.N = N def __call__(self, nu): return run_sis(self.T, self.G, self.S, self.I, self.newI, nu, self.mu, self.d, self.beta, self.N) def greedy(grad, U, L, K): ''' Greedily select budget number of elements with highest weight according to grad ''' sorted_groups = torch.sort(grad)[1] nu = L.clone() curr = 0 while (nu - L).sum() < K and curr < len(grad): amount_add = min([U[sorted_groups[curr]] - L[sorted_groups[curr]], K - (nu - L).sum()]) nu[[sorted_groups[curr]]] += amount_add curr += 1 return nu def sfw_torch(L, U, K, T, G, S, I, newI, mu, d, beta, N, num_iters = 100): sis = SISInstance(T, G, S, I, newI, mu, d, beta, N) nu = torch.rand_like(L, requires_grad=True) nu.data.zero_() nu.grad = torch.zeros_like(nu) for i in range(num_iters): val = sis(nu + L) nu.grad.zero_() val.backward() nu.data += 1./num_iters * greedy(nu.grad, U - L, torch.zeros_like(nu), K) nu.data += L return nu def sfw_seis_torch(L, U, K, T, G, S, I, migration_I, migration_E, mu, d, beta, N, alpha_fast, alpha_slow, E, beta_information, nu_max, num_iters = 100): nu = torch.rand_like(L, requires_grad=True) nu.data.zero_() nu.grad = torch.zeros_like(nu) for i in range(num_iters): print('optimizing: {}/{}'.format(i, num_iters)) all_I, all_E, all_F = run_seis_information_new(T, G, S, I, migration_I, migration_E, nu + L, mu, d, beta, N, alpha_fast, alpha_slow, E, beta_information, nu_max) val = all_I.sum() nu.grad.zero_() val.backward() nu.data += 1./num_iters * greedy(nu.grad, U - L, torch.zeros_like(nu), K) nu.data += L #print(nu) return nu
1,754
0
126
886d846b1bd5e2c59d450a892dc2cfd09102c8c5
3,042
py
Python
cj_2nd/problem_2/problem2.py
yskang/AlgorithmPracticeWithPython
f7129bd1924a7961489198f0ee052d2cd1e9cf40
[ "MIT" ]
null
null
null
cj_2nd/problem_2/problem2.py
yskang/AlgorithmPracticeWithPython
f7129bd1924a7961489198f0ee052d2cd1e9cf40
[ "MIT" ]
1
2019-11-04T06:44:04.000Z
2019-11-04T06:46:55.000Z
cj_2nd/problem_2/problem2.py
yskang/AlgorithmPractice
31b76e38b4c2f1e3e29fb029587662a745437912
[ "MIT" ]
null
null
null
import sys filename = sys.argv[1] test_input = open(filename, 'r') numberOfTest = int(test_input.readline().replace('\n', '').replace('\r', '').strip()) nodesR = [] distanceR = [] nodesC = [] distanceC = [] branchPoint = 0 output = open(filename.replace('in', 'out'), 'w') for i in range(numberOfTest): initData() numNodeRow = int(test_input.readline().replace('\n', '').replace('\r', '').strip()) line = test_input.readline().replace('\n', '').replace('\r', '').strip() ins = line.split(' ') for j in range(0, 2 * (numNodeRow - 1), 2): nodesR.append(int(ins[j])) distanceR.append(int(ins[j+1])) ins = test_input.readline().replace('\n', '').replace('\r', '').strip().split(' ') branchPoint = int(ins[0]) numNodeCol = int(ins[1]) line = test_input.readline().replace('\n', '').replace('\r', '').strip() ins = line.split(' ') for j in range(0, 2 * numNodeCol, 2): distanceC.append(int(ins[j])) nodesC.append(int(ins[j+1])) if branchPoint-1 >= len(nodesR): nodesC.insert(0, 100) else: nodesC.insert(0, nodesR[branchPoint-1]) checkCost() test_input.close() output.close()
28.698113
87
0.606838
import sys filename = sys.argv[1] test_input = open(filename, 'r') numberOfTest = int(test_input.readline().replace('\n', '').replace('\r', '').strip()) nodesR = [] distanceR = [] nodesC = [] distanceC = [] branchPoint = 0 def initData(): nodesR[:] = [] distanceR[:] = [] nodesC[:] = [] distanceC[:] = [] branchPoint = 0 def getCostA(): cost = 0 currentCost = nodesR[0] for i in range(branchPoint - 1): if currentCost > nodesR[i]: currentCost = nodesR[i] cost = cost + currentCost * distanceR[i] return (cost, currentCost) def getCostB(initCost): cost = 0 currentCost = initCost for i in range(branchPoint - 1, len(nodesR)): if currentCost > nodesR[i]: currentCost = nodesR[i] cost = cost + currentCost * distanceR[i] return cost def getForwardCost(initCost, returnPoint): cost = 0 currentCost = initCost for i in range(returnPoint): if currentCost > nodesC[i]: currentCost = nodesC[i] cost = cost + currentCost * distanceC[i] return (cost, currentCost) def getBackwardCost(initCost, returnPoint): cost = 0 currentCost = initCost for i in range(returnPoint, 0, -1): if currentCost > nodesC[i]: currentCost = nodesC[i] cost = cost + currentCost * distanceC[i - 1] return (cost, currentCost) def getBranchCost(initCost, returnPoint): (forwardCost, lastCost) = getForwardCost(initCost, returnPoint) (backwardCost, lastCost) = getBackwardCost(lastCost, returnPoint) return (forwardCost + backwardCost, lastCost) def checkCost(): (costA, lastCost) = getCostA() costB = getCostB(lastCost) totalCosts = [] totalCosts.append(costA + costB) for i in range(len(nodesC)): if lastCost > nodesC[i]: (branchCost, branchLastCost) = getBranchCost(lastCost, i) costB = getCostB(branchLastCost) totalCosts.append(costA + branchCost + costB) # print(min(totalCosts)) output.write(str(min(totalCosts)) + '\n') output = open(filename.replace('in', 'out'), 'w') for i in range(numberOfTest): initData() numNodeRow = int(test_input.readline().replace('\n', '').replace('\r', '').strip()) line = test_input.readline().replace('\n', '').replace('\r', '').strip() ins = line.split(' ') for j in range(0, 2 * (numNodeRow - 1), 2): nodesR.append(int(ins[j])) distanceR.append(int(ins[j+1])) ins = test_input.readline().replace('\n', '').replace('\r', '').strip().split(' ') branchPoint = int(ins[0]) numNodeCol = int(ins[1]) line = test_input.readline().replace('\n', '').replace('\r', '').strip() ins = line.split(' ') for j in range(0, 2 * numNodeCol, 2): distanceC.append(int(ins[j])) nodesC.append(int(ins[j+1])) if branchPoint-1 >= len(nodesR): nodesC.insert(0, 100) else: nodesC.insert(0, nodesR[branchPoint-1]) checkCost() test_input.close() output.close()
1,696
0
161
ece9c2de7d3783b49ddc26cdb15385cc57ba75f8
1,094
py
Python
LinearRegression/pcr.py
jayshonzs/ESL
2b51b1e01cb204d17588b7a2a8f765daa0a1e0c5
[ "MIT" ]
9
2016-09-26T15:56:16.000Z
2019-10-21T05:59:14.000Z
LinearRegression/pcr.py
jayshonzs/ESL
2b51b1e01cb204d17588b7a2a8f765daa0a1e0c5
[ "MIT" ]
null
null
null
LinearRegression/pcr.py
jayshonzs/ESL
2b51b1e01cb204d17588b7a2a8f765daa0a1e0c5
[ "MIT" ]
3
2016-08-15T00:18:22.000Z
2019-02-28T14:54:55.000Z
''' Created on 2014-5-25 @author: xiajie ''' import numpy as np import prostate import preprocess if __name__ == '__main__': inputs, output, Ttype = prostate.loaddata() train_data, train_out, test_data, test_out = prostate.cookdata2(inputs, output, Ttype) X, X_mean, Y_mean, X_std = preprocess.center_data(train_data, train_out, True) beta = PCR(X,train_out) print beta RSS = 0 for i in range(len(test_out)): x = test_data[i] x = (x-X_mean)/X_std print test_out[i], predict(x,beta,Y_mean) RSS += (test_out[i]-predict(x,beta,Y_mean))**2 print RSS/len(test_out)
26.682927
90
0.617002
''' Created on 2014-5-25 @author: xiajie ''' import numpy as np import prostate import preprocess def theta(z, y): return np.transpose(z).dot(y)/np.transpose(z).dot(z) def PCR(X,Y,m=7): z = [] thetas = [] beta = np.zeros(len(X[0])) U, S, V = np.linalg.svd(X, full_matrices=True) for i in range(m): z.append(X.dot(V[i])) for i in range(m): thetas.append(theta(z[i],Y)) for i in range(m): beta = beta + V[i]*thetas[i] return beta def predict(x, beta, Y_mean): return Y_mean + np.transpose(x).dot(beta) if __name__ == '__main__': inputs, output, Ttype = prostate.loaddata() train_data, train_out, test_data, test_out = prostate.cookdata2(inputs, output, Ttype) X, X_mean, Y_mean, X_std = preprocess.center_data(train_data, train_out, True) beta = PCR(X,train_out) print beta RSS = 0 for i in range(len(test_out)): x = test_data[i] x = (x-X_mean)/X_std print test_out[i], predict(x,beta,Y_mean) RSS += (test_out[i]-predict(x,beta,Y_mean))**2 print RSS/len(test_out)
400
0
69
46fd6c109844df9a60a301cc1c15e3eb17f850a2
11,188
py
Python
tests/unit/profiles/test_views.py
etienne86/oc_p13_team_spirit
fd3d45618d349ecd0a03e63c4a7e9c1044eeffaa
[ "MIT" ]
null
null
null
tests/unit/profiles/test_views.py
etienne86/oc_p13_team_spirit
fd3d45618d349ecd0a03e63c4a7e9c1044eeffaa
[ "MIT" ]
null
null
null
tests/unit/profiles/test_views.py
etienne86/oc_p13_team_spirit
fd3d45618d349ecd0a03e63c4a7e9c1044eeffaa
[ "MIT" ]
null
null
null
"""Contain the unit tests related to the views in app ``profiles``.""" from django.http.request import HttpRequest from django.test import TestCase from teamspirit.core.models import Address from teamspirit.profiles.models import Personal from teamspirit.profiles.views import ( add_id_file_view, add_medical_file_view, custom_password_reset_complete_view, custom_password_reset_done_view, drop_file_view, drop_id_file_view, drop_medical_file_view, password_changed_view, personal_info_view, phone_address_view, profile_view, ) from teamspirit.users.models import User class ProfilesViewsTestCase(TestCase): """Test the views in the app ``profiles``.""" def test_profile_view(self): """Unit test - app ``profiles`` - view ``profile_view`` Test the profile view. """ view = profile_view response = view(self.get_request) # type is TemplateResponse # render the response content response.render() html = response.content.decode('utf8') self.assertEqual(response.status_code, 200) self.assertTrue(html.startswith('<!DOCTYPE html>')) self.assertIn('<title>Team Spirit - Profil</title>', html) # next test: status 403 (Forbidden), problem with CSRF? # def test_custom_password_change_view(self): # """Unit test - app ``profiles`` - view ... # [complete view: ``custom_password_change_view``] # Test the custom password change view. # """ # view = custom_password_change_view # view = requires_csrf_token(view) # response = view(self.get_request) # print(response) # print(type(response)) # # response = csrf_exempt(view)(self.get_request) # # render the response content # # response.render() # html = response.content.decode('utf8') # self.assertEqual(response.status_code, 200) # self.assertTrue(html.startswith('<!DOCTYPE html>')) # self.assertIn( # '<title>Team Spirit - Changement de mot de passe</title>', # html # ) def test_password_changed_view(self): """Unit test - app ``profiles`` - view ``password_changed_view`` Test the 'password changed' (confirmation) view. """ view = password_changed_view response = view(self.get_request) # type is TemplateResponse # render the response content response.render() html = response.content.decode('utf8') self.assertEqual(response.status_code, 200) self.assertTrue(html.startswith('<!DOCTYPE html>')) self.assertIn('<title>Team Spirit - Mot de passe changé</title>', html) # next test: status 403 (Forbidden), problem with CSRF? # def test_password_reset_view(self): # """Unit test - app ``profiles`` - view ``custom_password_reset_view`` # Test the custom password reset view. # """ # view = custom_password_reset_view # response = view(self.get_request) # # response = csrf_exempt(view)(self.get_request) # # render the response content # # response.render() # html = response.content.decode('utf8') # self.assertEqual(response.status_code, 200) # self.assertTrue(html.startswith('<!DOCTYPE html>')) # self.assertIn( # '<title>Team Spirit - Réinitialisation du mot de passe</title>', # html # ) def test_password_reset_done_view(self): """Unit test - app ``profiles`` - view ... [complete view: ``custom_password_reset_done_view``] Test the custom password reset (done) view. """ view = custom_password_reset_done_view response = view(self.get_request) # type is TemplateResponse # render the response content response.render() html = response.content.decode('utf8') self.assertEqual(response.status_code, 200) self.assertTrue(html.startswith('<!DOCTYPE html>')) self.assertIn( '<title>Team Spirit - ' 'Envoi d\'un mail pour réinitialisation du mot de passe</title>', html ) # next test: AttributeError: 'NoneType' object has no attribute 'is_bound' # I do not know how to generate/mock the `uidb64` and `token`. # def test_password_reset_confirm_view(self): # """Unit test - app ``profiles`` - view ... # [complete view: ``custom_password_reset_confirm_view``] # Test the custom password reset confirm view. # """ # view = custom_password_reset_confirm_view # response = view(self.get_request, uidb64='uidb64', token='token') # print(type(response)) # print(response) # # response = csrf_exempt(view)(self.get_request) # # render the response content # response.render() # html = response.content.decode('utf8') # self.assertEqual(response.status_code, 200) # self.assertTrue(html.startswith('<!DOCTYPE html>')) # self.assertIn( # '<title>Team Spirit - ' # 'Définition du nouveau mot de passe</title>', # html # ) def test_password_reset_complete_view(self): """Unit test - app ``profiles`` - view ... [complete view: ``custom_password_reset_complete_view``] Test the custom password reset (complete) view. """ view = custom_password_reset_complete_view response = view(self.get_request) # type is TemplateResponse # render the response content response.render() html = response.content.decode('utf8') self.assertEqual(response.status_code, 200) self.assertTrue(html.startswith('<!DOCTYPE html>')) self.assertIn('<title>Team Spirit - Mot de passe réinitialisé', html) def test_personal_info_view(self): """Unit test - app ``profiles`` - view ``personal_info_view`` Test the personal info view. """ view = personal_info_view response = view(self.get_request) # type is TemplateResponse # render the response content response.render() html = response.content.decode('utf8') self.assertEqual(response.status_code, 200) self.assertTrue(html.startswith('<!DOCTYPE html>')) self.assertIn( '<title>Team Spirit - Mise à jour des informations personnelles', html ) def test_update_phone_address_view(self): """Unit test - app ``profiles`` - view ``phone_address_view`` Test the phone and address view. """ view = phone_address_view response = view(self.get_request) # type is TemplateResponse html = response.content.decode('utf8') self.assertEqual(response.status_code, 200) self.assertTrue(html.startswith('<!DOCTYPE html>')) self.assertIn( '<title>Team Spirit - Mise à jour des coordonnées', html ) def test_add_medical_file_view(self): """Unit test - app ``profiles`` - view ``add_medical_file_view`` Test the 'medical file add' view. """ view = add_medical_file_view response = view(self.get_request) # type is TemplateResponse # render the response content response.render() html = response.content.decode('utf8') self.assertEqual(response.status_code, 200) self.assertTrue(html.startswith('<!DOCTYPE html>')) self.assertIn( '<title>Team Spirit - Mise à jour du certificat médical ou de la' ' licence', html ) def test_add_id_file_view(self): """Unit test - app ``profiles`` - view ``add_id_file_view`` Test the 'id file add' view. """ view = add_id_file_view response = view(self.get_request) # type is TemplateResponse # render the response content response.render() html = response.content.decode('utf8') self.assertEqual(response.status_code, 200) self.assertTrue(html.startswith('<!DOCTYPE html>')) self.assertIn( '<title>Team Spirit - Mise à jour de la pièce d\'identité', html ) def test_drop_medical_file_view(self): """Unit test - app ``profiles`` - view ``drop_medical_file_view`` Test the 'medical file drop' view. """ view = drop_medical_file_view response = view(self.get_request) # type is TemplateResponse # render the response content response.render() html = response.content.decode('utf8') self.assertEqual(response.status_code, 200) self.assertTrue(html.startswith('<!DOCTYPE html>')) self.assertIn( '<title>Team Spirit - Suppression du certificat médical ou de la' ' licence', html ) def test_drop_id_file_view(self): """Unit test - app ``profiles`` - view ``drop_id_file_view`` Test the 'id file drop' view. """ view = drop_id_file_view response = view(self.get_request) # type is TemplateResponse # render the response content response.render() html = response.content.decode('utf8') self.assertEqual(response.status_code, 200) self.assertTrue(html.startswith('<!DOCTYPE html>')) self.assertIn( '<title>Team Spirit - Suppression de la pièce d\'identité', html ) def test_drop_file_view(self): """Unit test - app ``profiles`` - view ``drop_file_view`` Test the 'file drop' view. """ view = drop_file_view response = view(self.get_request) # type is TemplateResponse # render the response content response.render() html = response.content.decode('utf8') self.assertEqual(response.status_code, 200) self.assertTrue(html.startswith('<!DOCTYPE html>')) self.assertIn( '<title>Team Spirit - Suppression d\'un fichier', html )
36.442997
79
0.611459
"""Contain the unit tests related to the views in app ``profiles``.""" from django.http.request import HttpRequest from django.test import TestCase from teamspirit.core.models import Address from teamspirit.profiles.models import Personal from teamspirit.profiles.views import ( add_id_file_view, add_medical_file_view, custom_password_reset_complete_view, custom_password_reset_done_view, drop_file_view, drop_id_file_view, drop_medical_file_view, password_changed_view, personal_info_view, phone_address_view, profile_view, ) from teamspirit.users.models import User class ProfilesViewsTestCase(TestCase): """Test the views in the app ``profiles``.""" def setUp(self): super().setUp() # a user in database self.address = Address.objects.create( label_first="1 rue de l'impasse", label_second="", postal_code="75000", city="Paris", country="France" ) self.personal = Personal.objects.create( phone_number="01 02 03 04 05", address=self.address ) self.user = User.objects.create_user( email="toto@mail.com", first_name="Toto", password="TopSecret", personal=self.personal ) # log this user in self.client.login(email="toto@mail.com", password="TopSecret") # a 'get' request self.get_request = HttpRequest() self.get_request.method = 'get' self.get_request.user = self.user # a 'post' request self.post_request = HttpRequest() self.post_request.method = 'post' self.post_request.user = self.user def test_profile_view(self): """Unit test - app ``profiles`` - view ``profile_view`` Test the profile view. """ view = profile_view response = view(self.get_request) # type is TemplateResponse # render the response content response.render() html = response.content.decode('utf8') self.assertEqual(response.status_code, 200) self.assertTrue(html.startswith('<!DOCTYPE html>')) self.assertIn('<title>Team Spirit - Profil</title>', html) # next test: status 403 (Forbidden), problem with CSRF? # def test_custom_password_change_view(self): # """Unit test - app ``profiles`` - view ... # [complete view: ``custom_password_change_view``] # Test the custom password change view. # """ # view = custom_password_change_view # view = requires_csrf_token(view) # response = view(self.get_request) # print(response) # print(type(response)) # # response = csrf_exempt(view)(self.get_request) # # render the response content # # response.render() # html = response.content.decode('utf8') # self.assertEqual(response.status_code, 200) # self.assertTrue(html.startswith('<!DOCTYPE html>')) # self.assertIn( # '<title>Team Spirit - Changement de mot de passe</title>', # html # ) def test_password_changed_view(self): """Unit test - app ``profiles`` - view ``password_changed_view`` Test the 'password changed' (confirmation) view. """ view = password_changed_view response = view(self.get_request) # type is TemplateResponse # render the response content response.render() html = response.content.decode('utf8') self.assertEqual(response.status_code, 200) self.assertTrue(html.startswith('<!DOCTYPE html>')) self.assertIn('<title>Team Spirit - Mot de passe changé</title>', html) # next test: status 403 (Forbidden), problem with CSRF? # def test_password_reset_view(self): # """Unit test - app ``profiles`` - view ``custom_password_reset_view`` # Test the custom password reset view. # """ # view = custom_password_reset_view # response = view(self.get_request) # # response = csrf_exempt(view)(self.get_request) # # render the response content # # response.render() # html = response.content.decode('utf8') # self.assertEqual(response.status_code, 200) # self.assertTrue(html.startswith('<!DOCTYPE html>')) # self.assertIn( # '<title>Team Spirit - Réinitialisation du mot de passe</title>', # html # ) def test_password_reset_done_view(self): """Unit test - app ``profiles`` - view ... [complete view: ``custom_password_reset_done_view``] Test the custom password reset (done) view. """ view = custom_password_reset_done_view response = view(self.get_request) # type is TemplateResponse # render the response content response.render() html = response.content.decode('utf8') self.assertEqual(response.status_code, 200) self.assertTrue(html.startswith('<!DOCTYPE html>')) self.assertIn( '<title>Team Spirit - ' 'Envoi d\'un mail pour réinitialisation du mot de passe</title>', html ) # next test: AttributeError: 'NoneType' object has no attribute 'is_bound' # I do not know how to generate/mock the `uidb64` and `token`. # def test_password_reset_confirm_view(self): # """Unit test - app ``profiles`` - view ... # [complete view: ``custom_password_reset_confirm_view``] # Test the custom password reset confirm view. # """ # view = custom_password_reset_confirm_view # response = view(self.get_request, uidb64='uidb64', token='token') # print(type(response)) # print(response) # # response = csrf_exempt(view)(self.get_request) # # render the response content # response.render() # html = response.content.decode('utf8') # self.assertEqual(response.status_code, 200) # self.assertTrue(html.startswith('<!DOCTYPE html>')) # self.assertIn( # '<title>Team Spirit - ' # 'Définition du nouveau mot de passe</title>', # html # ) def test_password_reset_complete_view(self): """Unit test - app ``profiles`` - view ... [complete view: ``custom_password_reset_complete_view``] Test the custom password reset (complete) view. """ view = custom_password_reset_complete_view response = view(self.get_request) # type is TemplateResponse # render the response content response.render() html = response.content.decode('utf8') self.assertEqual(response.status_code, 200) self.assertTrue(html.startswith('<!DOCTYPE html>')) self.assertIn('<title>Team Spirit - Mot de passe réinitialisé', html) def test_personal_info_view(self): """Unit test - app ``profiles`` - view ``personal_info_view`` Test the personal info view. """ view = personal_info_view response = view(self.get_request) # type is TemplateResponse # render the response content response.render() html = response.content.decode('utf8') self.assertEqual(response.status_code, 200) self.assertTrue(html.startswith('<!DOCTYPE html>')) self.assertIn( '<title>Team Spirit - Mise à jour des informations personnelles', html ) def test_update_phone_address_view(self): """Unit test - app ``profiles`` - view ``phone_address_view`` Test the phone and address view. """ view = phone_address_view response = view(self.get_request) # type is TemplateResponse html = response.content.decode('utf8') self.assertEqual(response.status_code, 200) self.assertTrue(html.startswith('<!DOCTYPE html>')) self.assertIn( '<title>Team Spirit - Mise à jour des coordonnées', html ) def test_add_medical_file_view(self): """Unit test - app ``profiles`` - view ``add_medical_file_view`` Test the 'medical file add' view. """ view = add_medical_file_view response = view(self.get_request) # type is TemplateResponse # render the response content response.render() html = response.content.decode('utf8') self.assertEqual(response.status_code, 200) self.assertTrue(html.startswith('<!DOCTYPE html>')) self.assertIn( '<title>Team Spirit - Mise à jour du certificat médical ou de la' ' licence', html ) def test_add_id_file_view(self): """Unit test - app ``profiles`` - view ``add_id_file_view`` Test the 'id file add' view. """ view = add_id_file_view response = view(self.get_request) # type is TemplateResponse # render the response content response.render() html = response.content.decode('utf8') self.assertEqual(response.status_code, 200) self.assertTrue(html.startswith('<!DOCTYPE html>')) self.assertIn( '<title>Team Spirit - Mise à jour de la pièce d\'identité', html ) def test_drop_medical_file_view(self): """Unit test - app ``profiles`` - view ``drop_medical_file_view`` Test the 'medical file drop' view. """ view = drop_medical_file_view response = view(self.get_request) # type is TemplateResponse # render the response content response.render() html = response.content.decode('utf8') self.assertEqual(response.status_code, 200) self.assertTrue(html.startswith('<!DOCTYPE html>')) self.assertIn( '<title>Team Spirit - Suppression du certificat médical ou de la' ' licence', html ) def test_drop_id_file_view(self): """Unit test - app ``profiles`` - view ``drop_id_file_view`` Test the 'id file drop' view. """ view = drop_id_file_view response = view(self.get_request) # type is TemplateResponse # render the response content response.render() html = response.content.decode('utf8') self.assertEqual(response.status_code, 200) self.assertTrue(html.startswith('<!DOCTYPE html>')) self.assertIn( '<title>Team Spirit - Suppression de la pièce d\'identité', html ) def test_drop_file_view(self): """Unit test - app ``profiles`` - view ``drop_file_view`` Test the 'file drop' view. """ view = drop_file_view response = view(self.get_request) # type is TemplateResponse # render the response content response.render() html = response.content.decode('utf8') self.assertEqual(response.status_code, 200) self.assertTrue(html.startswith('<!DOCTYPE html>')) self.assertIn( '<title>Team Spirit - Suppression d\'un fichier', html )
995
0
27
86a8ad2afb7633e6e47ada84d04ae169c3278ecc
4,831
py
Python
modules/benchmark_common_functions.py
liushiyu1994/random_binary_file
17c317b777387346114a4ad20c1bc3cb8a4147ce
[ "MIT" ]
null
null
null
modules/benchmark_common_functions.py
liushiyu1994/random_binary_file
17c317b777387346114a4ad20c1bc3cb8a4147ce
[ "MIT" ]
null
null
null
modules/benchmark_common_functions.py
liushiyu1994/random_binary_file
17c317b777387346114a4ad20c1bc3cb8a4147ce
[ "MIT" ]
null
null
null
from .packages import os, plt from .common_functions import parameter_check_function, random_binary_files_single, time_stamp
49.295918
114
0.742703
from .packages import os, plt from .common_functions import parameter_check_function, random_binary_files_single, time_stamp def benchmark_text_output(result_sorted_list, output_file_name, output_folder_name): headline = "Generator name,\t\t Running time,\t\t File count rate (count/s),\t\t File size rate (MiB/s)" text_output_line_list = [headline] for generator_name, current_time, file_num_rate, file_size_rate in result_sorted_list: newline = "{},\t\t {},\t\t {},\t\t {}".format(generator_name, current_time, file_num_rate, file_size_rate) text_output_line_list.append(newline) output_file_path = "{}/{}".format(output_folder_name, output_file_name) with open(output_file_path, 'w') as f_out: f_out.write("\n".join(text_output_line_list)) def benchmark_plotting(result_sorted_list, output_figure_name, output_folder_name): x_loc_left_offset = 0.5 width = 0.5 x_loc_list = [] x_label_list = [] data_list = [] for index, (generator_name, _, _, file_size_rate) in enumerate(result_sorted_list): x_loc_list.append(x_loc_left_offset + index) x_label_list.append(generator_name) data_list.append(file_size_rate) fig, ax = plt.subplots(figsize=(12, 8)) ax.bar(x_loc_list, data_list, width) ax.set_xlim([0, len(result_sorted_list) + x_loc_left_offset]) ax.set_ylabel('File size rate (MiB/s)') ax.set_xticks(x_loc_list) ax.set_xticklabels(x_label_list, rotation=15) save_path = "{}/{}".format(output_folder_name, output_figure_name) fig.savefig(save_path, dpi=fig.dpi) def benchmark_parameter_check_function(config): error_string = "config.py must include following attributes for benchmark: {}" try: generator_name_set = config.generator_name_list except AttributeError: raise ValueError(error_string.format("generator_name_list")) try: benchmark_output_folder = config.benchmark_output_folder except AttributeError: raise ValueError(error_string.format("benchmark_output_folder")) if os is not None and not os.path.isdir(benchmark_output_folder): raise ValueError("Cannot find benchmark output folder: {}".format(benchmark_output_folder)) try: with open("{}/test.txt".format(benchmark_output_folder), 'w') as f_out: f_out.write("This is just a test for the benchmark output folder") except Exception: raise ValueError("Cannot write file to folder: {}".format(benchmark_output_folder)) return generator_name_set, benchmark_output_folder def make_son_folder(parent_folder_path, target_son_folder_name): target_son_folder_path = "{}/{}".format(parent_folder_path, target_son_folder_name) if os is not None: if not os.path.isdir(target_son_folder_path): os.mkdir(target_son_folder_path) return target_son_folder_path else: return parent_folder_path def benchmark_entry_function(config): ( _, current_file_num, current_file_size, current_file_name_length, current_output_folder, seed_length, parallel, parallel_num, parallel_file_name_prefix_length) = parameter_check_function(config) def current_print_func(file_count): print("[{}] {}({:.2f}%) finished".format(time_stamp(), file_count, file_count / current_file_num * 100)) generator_name_list, benchmark_output_folder = benchmark_parameter_check_function(config) benchmark_file_output_folder = make_son_folder(benchmark_output_folder, "output_files") total_file_size = current_file_size * current_file_num final_time_list = [] print("[{}] Start benchmark".format(time_stamp())) for generator_name in generator_name_list: print("[{}] Start generator: {}".format(time_stamp(), generator_name)) current_generator_output_folder = make_son_folder(benchmark_file_output_folder, generator_name) current_time = random_binary_files_single( generator_name, current_file_num, current_file_size, current_file_name_length, current_generator_output_folder, seed_length=seed_length, print_func=current_print_func) file_num_rate = current_file_num / current_time file_size_rate = total_file_size / current_time / 10**6 final_time_list.append((generator_name, current_time, file_num_rate, file_size_rate)) print("[{}] Finish generator: {}".format(time_stamp(), generator_name)) sorted_final_time_list = sorted(final_time_list, key=lambda x: x[3]) text_output_file_name = "benchmark_result.csv" benchmark_text_output(sorted_final_time_list, text_output_file_name, benchmark_output_folder) if plt is not None: figure_name = "benchmark_result.png" benchmark_plotting(sorted_final_time_list, figure_name, benchmark_output_folder) plt.show()
4,586
0
115
6ad8ac63ac2ca2e4e626cb985983826a1b89907e
10,317
py
Python
bin/vrc_ssh.py
osrf/cloudsim-legacy
01ea7dd2708ed9797a860ac839028ec62fd96a23
[ "Apache-2.0" ]
null
null
null
bin/vrc_ssh.py
osrf/cloudsim-legacy
01ea7dd2708ed9797a860ac839028ec62fd96a23
[ "Apache-2.0" ]
null
null
null
bin/vrc_ssh.py
osrf/cloudsim-legacy
01ea7dd2708ed9797a860ac839028ec62fd96a23
[ "Apache-2.0" ]
1
2021-03-16T15:00:51.000Z
2021-03-16T15:00:51.000Z
#!/usr/bin/env python """ Program that runs a script or copy a file into a set of machines. """ import argparse import os import sys from threading import Thread import abc import subprocess import getpass import Queue NORMAL = '\033[00m' RED = '\033[0;31m' CLOUDSIM_PREFIX = 'OSRF_CloudSim_' CONST_PREFIX = 'OSRF_VRC_Constellation_' MACHINES = ['cs', 'router', 'sim', 'fc1', 'fc2'] class Ssh_base(object): ''' Abstract class for running a ssh/scp command. ''' DEFAULT_SSH_OPTS = ('-o UserKnownHostsFile=/dev/null ' '-o StrictHostKeyChecking=no ' '-o ConnectTimeout=5') __metaclass__ = abc.ABCMeta def get_credentials(self): ''' To be implemented by derived classes. ''' return def run_remote_command(self, cmd, queue): ''' Execute a command, and put in a shared queue the stdout, stderr outputs. @param cmd Command to be executed @param queue Shared queue (thread safe) ''' po = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = po.communicate() if po.returncode == 0: msg = self.name + ': Success' if out != '': msg += '\n\t' + out.replace('\n', '\n\t') queue.put(msg) else: queue.put(self.name + ': Error\n\t%s' % err) def get_cmd_context(self, ssh_options): ''' Returns the current ssh options, key file and ip of the current machine. @param ssh_options Extra ssh options to be used ''' ssh_options = Ssh_base.DEFAULT_SSH_OPTS + ssh_options key, ip = self.get_credentials() if not os.path.exists(key): raise NameError('Identity file not found (%s)...Aborting' % key) return ssh_options, key, ip def copy(self, ssh_options, src, user, dst, queue): ''' Copy a file into a remote machine. @param ssh_options Extra ssh options @param src Full path to the file to be uploaded @param user Remote user used by ssh @param dst Destination path @param queue Shared queue to store the output of the command ''' ssh_options, key, ip = self.get_cmd_context(ssh_options) cmd = ('scp ' + ssh_options + ' -i ' + key + ' ' + src + ' ' + user + '@' + ip + ':' + dst) self.run_remote_command(cmd, queue) def run(self, ssh_options, cmd, user, queue): ''' Run a command in a remote machine. @param ssh_options Extra ssh options @param cmd Command to be executed @param user Remote user used by ssh @param queue Shared queue to store the output of the command ''' ssh_options, key, ip = self.get_cmd_context(ssh_options) cmd = ('ssh ' + ssh_options + ' -i ' + key + ' ' + user + '@' + ip + ' ' + cmd) self.run_remote_command(cmd, queue) class Ssh_cloudsim(Ssh_base): ''' Derived class for running a ssh/scp command in a CloudSim machine. ''' def get_credentials(self): ''' Return the CloudSim credentials. ''' directory = self.constellation['constellation_directory'] ip = self.constellation['simulation_ip'] key = os.path.join(directory, 'cs', 'key-cs.pem') return (key, ip) class Ssh_router(Ssh_base): ''' Derived class for running a ssh/scp command in a router machine. ''' def get_credentials(self): ''' Return the Router credentials. ''' directory = self.constellation['constellation_directory'] ip = self.constellation['router_public_ip'] key = os.path.join(directory, 'router', 'key-router.pem') return (key, ip) class Ssh_machine(Ssh_base): ''' Derived class for running a ssh/scp command in a sim, fc1, or fc2 machine. ''' def get_credentials(self): ''' Return the Sim, FC1, or FC2 credentials. ''' if self.machine_type == 'sim': ip = '10.0.0.51' key_name = 'key-sim.pem' elif self.machine_type == 'fc1': ip = '10.0.0.52' key_name = 'key-fc1.pem' elif self.machine_type == 'fc2': ip = '10.0.0.53' key_name = 'key-fc2.pem' else: raise NameError('Invalid machine type: %s' % self.machine_type) key = os.path.join('/home/ubuntu/cloudsim', key_name) return (key, ip) def go(args): ''' Function that run a command or copy a file in multiples machines. A thread is created to the job in every machine. A shared queue among the threads is used to capture the returned value of the commands, stdout, and stderr. @param args Command line arguments ''' # Retrieve command line arguments ssh_options = args.ssh_options machine_type = args.type subcommand = args.which if subcommand == 'run': arg = args.cmd stats_msg = 'Command executed in' elif subcommand == 'copy': arg = args.src dst = args.dst stats_msg = 'File uploaded into' if not os.path.exists(arg): print '%sFile not found: (%s)...Aborting%s' % (RED, arg, NORMAL) sys.exit(1) else: print 'Invalid subcommand (%s)...Aborting' % subcommand sys.exit(1) # Sanity check if getpass.getuser() != 'root': print "Invalid user, you should run this program as root...Aborting" sys.exit(1) # Counter for stats, threads for the job, and a queue for the returned vals11 counter = 0 threads = [] succeed_queue = Queue.Queue() if machine_type in ['cs', 'router']: try: # Import cloudsimd basepath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) if not os.path.exists(os.path.join(basepath, 'cloudsimd')): print 'Your DB does not contain any remote CloudSim...Aborting' sys.exit(1) sys.path.insert(0, basepath) from cloudsimd import cloudsimd const_list = cloudsimd.list_constellations() except Exception, excep: print ('%sError importing cloudsimd: %s%s' % (RED, repr(excep), NORMAL)) sys.exit(1) # Iterate over the list of CloudSims for constellation in const_list: try: name = constellation['constellation_name'] if name.startswith(CLOUDSIM_PREFIX) and machine_type == 'cs': machine = Ssh_cloudsim(name, constellation) elif name.startswith(CONST_PREFIX) and machine_type == 'router': machine = Ssh_router(name, constellation) else: continue if subcommand == 'run': params = [ssh_options, arg, 'ubuntu', succeed_queue] t = Thread(target=machine.run, args=params) elif subcommand == 'copy': params = [ssh_options, arg, 'ubuntu', dst, succeed_queue] t = Thread(target=machine.copy, args=params) threads.append(t) t.start() counter += 1 except Exception, excep: print ('%sError running command: %s%s' % (RED, repr(excep), NORMAL)) counter -= 1 elif machine_type in ['sim', 'fc1', 'fc2']: try: machine = Ssh_machine(machine_type) if subcommand == 'run': machine.run(ssh_options, arg, 'ubuntu', succeed_queue) elif subcommand == 'copy': machine.copy(ssh_options, arg, 'ubuntu', dst, succeed_queue) counter += 1 except Exception, excep: print repr(excep) sys.exit(1) else: print 'Invalid machine type (%s)...Aborting' % machine_type sys.exit(1) # Wait for all the threads to finish [x.join() for x in threads] # Print some stats for elem in sorted(list(succeed_queue.queue)): print elem print '%s %d machine/s' % (stats_msg, counter) if __name__ == '__main__': # Specify top level command line arguments parser = argparse.ArgumentParser(description='Manage multiple VRC SSH/SCP') parser.add_argument('-o', '--ssh_options', default='', help='ssh options') subparsers = parser.add_subparsers(title='subcommands', description='valid subcommands', help='additional help') # create the parser for the "run" command parser_run = subparsers.add_parser('run', help='Run a command in several machines') parser_run.add_argument('cmd', help='Command to run remotely') parser_run.set_defaults(which='run') parser_run.add_argument('type', choices=MACHINES, help='Remote machine type') parser_run.set_defaults(func=go) # create the parser for the "copy" command parser_copy = subparsers.add_parser('copy', help='Copy a file into several machines') parser_copy.add_argument('src', help='File to upload') parser_copy.set_defaults(which='copy') parser_copy.add_argument('dst', help='Destination path') parser_copy.add_argument('type', choices=MACHINES, help='Remote machine type') parser_copy.set_defaults(func=go) # Parse command line arguments args = parser.parse_args() args.func(args)
34.275748
85
0.583794
#!/usr/bin/env python """ Program that runs a script or copy a file into a set of machines. """ import argparse import os import sys from threading import Thread import abc import subprocess import getpass import Queue NORMAL = '\033[00m' RED = '\033[0;31m' CLOUDSIM_PREFIX = 'OSRF_CloudSim_' CONST_PREFIX = 'OSRF_VRC_Constellation_' MACHINES = ['cs', 'router', 'sim', 'fc1', 'fc2'] class Ssh_base(object): ''' Abstract class for running a ssh/scp command. ''' DEFAULT_SSH_OPTS = ('-o UserKnownHostsFile=/dev/null ' '-o StrictHostKeyChecking=no ' '-o ConnectTimeout=5') __metaclass__ = abc.ABCMeta def __init__(self, name, constellation=None): self.constellation = constellation self.name = name def get_credentials(self): ''' To be implemented by derived classes. ''' return def run_remote_command(self, cmd, queue): ''' Execute a command, and put in a shared queue the stdout, stderr outputs. @param cmd Command to be executed @param queue Shared queue (thread safe) ''' po = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = po.communicate() if po.returncode == 0: msg = self.name + ': Success' if out != '': msg += '\n\t' + out.replace('\n', '\n\t') queue.put(msg) else: queue.put(self.name + ': Error\n\t%s' % err) def get_cmd_context(self, ssh_options): ''' Returns the current ssh options, key file and ip of the current machine. @param ssh_options Extra ssh options to be used ''' ssh_options = Ssh_base.DEFAULT_SSH_OPTS + ssh_options key, ip = self.get_credentials() if not os.path.exists(key): raise NameError('Identity file not found (%s)...Aborting' % key) return ssh_options, key, ip def copy(self, ssh_options, src, user, dst, queue): ''' Copy a file into a remote machine. @param ssh_options Extra ssh options @param src Full path to the file to be uploaded @param user Remote user used by ssh @param dst Destination path @param queue Shared queue to store the output of the command ''' ssh_options, key, ip = self.get_cmd_context(ssh_options) cmd = ('scp ' + ssh_options + ' -i ' + key + ' ' + src + ' ' + user + '@' + ip + ':' + dst) self.run_remote_command(cmd, queue) def run(self, ssh_options, cmd, user, queue): ''' Run a command in a remote machine. @param ssh_options Extra ssh options @param cmd Command to be executed @param user Remote user used by ssh @param queue Shared queue to store the output of the command ''' ssh_options, key, ip = self.get_cmd_context(ssh_options) cmd = ('ssh ' + ssh_options + ' -i ' + key + ' ' + user + '@' + ip + ' ' + cmd) self.run_remote_command(cmd, queue) class Ssh_cloudsim(Ssh_base): ''' Derived class for running a ssh/scp command in a CloudSim machine. ''' def __init__(self, name, constellation): super(Ssh_cloudsim, self).__init__(name, constellation) def get_credentials(self): ''' Return the CloudSim credentials. ''' directory = self.constellation['constellation_directory'] ip = self.constellation['simulation_ip'] key = os.path.join(directory, 'cs', 'key-cs.pem') return (key, ip) class Ssh_router(Ssh_base): ''' Derived class for running a ssh/scp command in a router machine. ''' def __init__(self, name, constellation): super(Ssh_router, self).__init__(name, constellation) def get_credentials(self): ''' Return the Router credentials. ''' directory = self.constellation['constellation_directory'] ip = self.constellation['router_public_ip'] key = os.path.join(directory, 'router', 'key-router.pem') return (key, ip) class Ssh_machine(Ssh_base): ''' Derived class for running a ssh/scp command in a sim, fc1, or fc2 machine. ''' def __init__(self, machine_type): super(Ssh_machine, self).__init__(machine_type) if machine_type not in ['sim', 'fc1', 'fc2']: raise NameError('Invalid machine type: %s' % machine_type) else: self.machine_type = machine_type def get_credentials(self): ''' Return the Sim, FC1, or FC2 credentials. ''' if self.machine_type == 'sim': ip = '10.0.0.51' key_name = 'key-sim.pem' elif self.machine_type == 'fc1': ip = '10.0.0.52' key_name = 'key-fc1.pem' elif self.machine_type == 'fc2': ip = '10.0.0.53' key_name = 'key-fc2.pem' else: raise NameError('Invalid machine type: %s' % self.machine_type) key = os.path.join('/home/ubuntu/cloudsim', key_name) return (key, ip) def go(args): ''' Function that run a command or copy a file in multiples machines. A thread is created to the job in every machine. A shared queue among the threads is used to capture the returned value of the commands, stdout, and stderr. @param args Command line arguments ''' # Retrieve command line arguments ssh_options = args.ssh_options machine_type = args.type subcommand = args.which if subcommand == 'run': arg = args.cmd stats_msg = 'Command executed in' elif subcommand == 'copy': arg = args.src dst = args.dst stats_msg = 'File uploaded into' if not os.path.exists(arg): print '%sFile not found: (%s)...Aborting%s' % (RED, arg, NORMAL) sys.exit(1) else: print 'Invalid subcommand (%s)...Aborting' % subcommand sys.exit(1) # Sanity check if getpass.getuser() != 'root': print "Invalid user, you should run this program as root...Aborting" sys.exit(1) # Counter for stats, threads for the job, and a queue for the returned vals11 counter = 0 threads = [] succeed_queue = Queue.Queue() if machine_type in ['cs', 'router']: try: # Import cloudsimd basepath = os.path.abspath(os.path.join(os.path.dirname(__file__), '..')) if not os.path.exists(os.path.join(basepath, 'cloudsimd')): print 'Your DB does not contain any remote CloudSim...Aborting' sys.exit(1) sys.path.insert(0, basepath) from cloudsimd import cloudsimd const_list = cloudsimd.list_constellations() except Exception, excep: print ('%sError importing cloudsimd: %s%s' % (RED, repr(excep), NORMAL)) sys.exit(1) # Iterate over the list of CloudSims for constellation in const_list: try: name = constellation['constellation_name'] if name.startswith(CLOUDSIM_PREFIX) and machine_type == 'cs': machine = Ssh_cloudsim(name, constellation) elif name.startswith(CONST_PREFIX) and machine_type == 'router': machine = Ssh_router(name, constellation) else: continue if subcommand == 'run': params = [ssh_options, arg, 'ubuntu', succeed_queue] t = Thread(target=machine.run, args=params) elif subcommand == 'copy': params = [ssh_options, arg, 'ubuntu', dst, succeed_queue] t = Thread(target=machine.copy, args=params) threads.append(t) t.start() counter += 1 except Exception, excep: print ('%sError running command: %s%s' % (RED, repr(excep), NORMAL)) counter -= 1 elif machine_type in ['sim', 'fc1', 'fc2']: try: machine = Ssh_machine(machine_type) if subcommand == 'run': machine.run(ssh_options, arg, 'ubuntu', succeed_queue) elif subcommand == 'copy': machine.copy(ssh_options, arg, 'ubuntu', dst, succeed_queue) counter += 1 except Exception, excep: print repr(excep) sys.exit(1) else: print 'Invalid machine type (%s)...Aborting' % machine_type sys.exit(1) # Wait for all the threads to finish [x.join() for x in threads] # Print some stats for elem in sorted(list(succeed_queue.queue)): print elem print '%s %d machine/s' % (stats_msg, counter) if __name__ == '__main__': # Specify top level command line arguments parser = argparse.ArgumentParser(description='Manage multiple VRC SSH/SCP') parser.add_argument('-o', '--ssh_options', default='', help='ssh options') subparsers = parser.add_subparsers(title='subcommands', description='valid subcommands', help='additional help') # create the parser for the "run" command parser_run = subparsers.add_parser('run', help='Run a command in several machines') parser_run.add_argument('cmd', help='Command to run remotely') parser_run.set_defaults(which='run') parser_run.add_argument('type', choices=MACHINES, help='Remote machine type') parser_run.set_defaults(func=go) # create the parser for the "copy" command parser_copy = subparsers.add_parser('copy', help='Copy a file into several machines') parser_copy.add_argument('src', help='File to upload') parser_copy.set_defaults(which='copy') parser_copy.add_argument('dst', help='Destination path') parser_copy.add_argument('type', choices=MACHINES, help='Remote machine type') parser_copy.set_defaults(func=go) # Parse command line arguments args = parser.parse_args() args.func(args)
508
0
105
d4cd314bd47d95820381415300f80a6c264fbc93
10,264
py
Python
sandbox/finetuning/algos/concurrent_continuous_ppo.py
andrewli77/rllab-finetuning
2dae9141d0fdc284d04f18931907131d66b43023
[ "MIT" ]
23
2020-04-27T23:53:44.000Z
2022-03-10T03:13:16.000Z
sandbox/finetuning/algos/concurrent_continuous_ppo.py
WeiChengTseng/rllab-finetuning
2dae9141d0fdc284d04f18931907131d66b43023
[ "MIT" ]
1
2021-11-14T13:30:22.000Z
2021-11-14T13:30:22.000Z
sandbox/finetuning/algos/concurrent_continuous_ppo.py
WeiChengTseng/rllab-finetuning
2dae9141d0fdc284d04f18931907131d66b43023
[ "MIT" ]
8
2020-06-17T03:28:34.000Z
2022-03-09T03:13:03.000Z
import theano import theano.tensor as TT from rllab.misc import ext import numpy as np import copy import rllab.misc.logger as logger from rllab.spaces.box import Box from rllab.envs.env_spec import EnvSpec from sandbox.finetuning.policies.concurrent_hier_policy2 import HierarchicalPolicy from sandbox.finetuning.algos.hier_batch_polopt import BatchPolopt, \ BatchSampler # note that I use my own BatchPolopt class here from sandbox.finetuning.algos.hier_batch_sampler import HierBatchSampler from rllab.optimizers.first_order_optimizer import FirstOrderOptimizer from rllab.distributions.diagonal_gaussian import DiagonalGaussian from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline from rllab.baselines.gaussian_mlp_baseline import GaussianMLPBaseline class ConcurrentContinuousPPO(BatchPolopt): """ Designed to enable concurrent training of a SNN that parameterizes skills and also train the manager at the same time Note that, if I'm not trying to do the sample approximation of the weird log of sum term, I don't need to know which skill was picked, just need to know the action """ # double check this constructor later # initialize the computation graph # optimize is run on >= 1 trajectory at a time # assumptions: 1 trajectory, which is a multiple of p; that the obs_var_probs is valid # do the optimization
48.87619
118
0.667771
import theano import theano.tensor as TT from rllab.misc import ext import numpy as np import copy import rllab.misc.logger as logger from rllab.spaces.box import Box from rllab.envs.env_spec import EnvSpec from sandbox.finetuning.policies.concurrent_hier_policy2 import HierarchicalPolicy from sandbox.finetuning.algos.hier_batch_polopt import BatchPolopt, \ BatchSampler # note that I use my own BatchPolopt class here from sandbox.finetuning.algos.hier_batch_sampler import HierBatchSampler from rllab.optimizers.first_order_optimizer import FirstOrderOptimizer from rllab.distributions.diagonal_gaussian import DiagonalGaussian from rllab.baselines.linear_feature_baseline import LinearFeatureBaseline from rllab.baselines.gaussian_mlp_baseline import GaussianMLPBaseline class ConcurrentContinuousPPO(BatchPolopt): """ Designed to enable concurrent training of a SNN that parameterizes skills and also train the manager at the same time Note that, if I'm not trying to do the sample approximation of the weird log of sum term, I don't need to know which skill was picked, just need to know the action """ # double check this constructor later def __init__(self, optimizer=None, optimizer_args=None, step_size=0.003, num_latents=6, latents=None, # some sort of iterable of the actual latent vectors period=10, # how often I choose a latent truncate_local_is_ratio=None, epsilon=0.1, train_pi_iters=10, use_skill_dependent_baseline=False, mlp_skill_dependent_baseline=False, freeze_manager=False, freeze_skills=False, **kwargs): if optimizer is None: if optimizer_args is None: # optimizer_args = dict() optimizer_args = dict(batch_size=None) self.optimizer = FirstOrderOptimizer(learning_rate=step_size, max_epochs=train_pi_iters, **optimizer_args) self.step_size = step_size self.truncate_local_is_ratio = truncate_local_is_ratio self.epsilon = epsilon super(ConcurrentContinuousPPO, self).__init__(**kwargs) # not sure if this line is correct self.num_latents = kwargs['policy'].latent_dim self.latents = latents self.period = period self.freeze_manager = freeze_manager self.freeze_skills = freeze_skills assert (not freeze_manager) or (not freeze_skills) # todo: fix this sampler stuff # import pdb; pdb.set_trace() self.sampler = HierBatchSampler(self, self.period) # self.sampler = BatchSampler(self) # i hope this is right self.diagonal = DiagonalGaussian(self.policy.low_policy.action_space.flat_dim) self.debug_fns = [] assert isinstance(self.policy, HierarchicalPolicy) self.period = self.policy.period assert self.policy.period == self.period self.continuous_latent = self.policy.continuous_latent assert self.continuous_latent # self.old_policy = copy.deepcopy(self.policy) # skill dependent baseline self.use_skill_dependent_baseline = use_skill_dependent_baseline self.mlp_skill_dependent_baseline = mlp_skill_dependent_baseline if use_skill_dependent_baseline: curr_env = kwargs['env'] skill_dependent_action_space = curr_env.action_space new_obs_space_no_bi = curr_env.observation_space.shape[0] + 1 # 1 for the t_remaining skill_dependent_obs_space_dim = (new_obs_space_no_bi * (self.num_latents + 1) + self.num_latents,) skill_dependent_obs_space = Box(-1.0, 1.0, shape=skill_dependent_obs_space_dim) skill_dependent_env_spec = EnvSpec(skill_dependent_obs_space, skill_dependent_action_space) if self.mlp_skill_dependent_baseline: self.skill_dependent_baseline = GaussianMLPBaseline(env_spec=skill_dependent_env_spec) else: self.skill_dependent_baseline = LinearFeatureBaseline(env_spec=skill_dependent_env_spec) # initialize the computation graph # optimize is run on >= 1 trajectory at a time # assumptions: 1 trajectory, which is a multiple of p; that the obs_var_probs is valid def init_opt(self): assert isinstance(self.policy, HierarchicalPolicy) assert not self.freeze_manager and not self.freeze_skills manager_surr_loss = 0 # skill_surr_loss = 0 obs_var_sparse = ext.new_tensor('sparse_obs', ndim=2, dtype=theano.config.floatX) obs_var_raw = ext.new_tensor('obs', ndim=3, dtype=theano.config.floatX) # todo: check the dtype action_var = self.env.action_space.new_tensor_variable('action', extra_dims=1, ) advantage_var = ext.new_tensor('advantage', ndim=1, dtype=theano.config.floatX) # latent_var = ext.new_tensor('latents', ndim=2, dtype=theano.config.floatX) mean_var = ext.new_tensor('mean', ndim=2, dtype=theano.config.floatX) log_std_var = ext.new_tensor('log_std', ndim=2, dtype=theano.config.floatX) # undoing the reshape, so that batch sampling is ok obs_var = TT.reshape(obs_var_raw, [obs_var_raw.shape[0] * obs_var_raw.shape[1], obs_var_raw.shape[2]]) ############################################################ ### calculating the skills portion of the surrogate loss ### ############################################################ latent_var_sparse = self.policy.manager.dist_info_sym(obs_var_sparse)['mean'] latent_var = TT.extra_ops.repeat(latent_var_sparse, self.period, axis=0) #.dimshuffle(0, 'x') dist_info_var = self.policy.low_policy.dist_info_sym(obs_var, state_info_var=latent_var) old_dist_info_var = dict(mean=mean_var, log_std=log_std_var) skill_lr = self.diagonal.likelihood_ratio_sym(action_var, old_dist_info_var, dist_info_var) skill_surr_loss_vector = TT.minimum(skill_lr * advantage_var, TT.clip(skill_lr, 1 - self.epsilon, 1 + self.epsilon) * advantage_var) skill_surr_loss = -TT.mean(skill_surr_loss_vector) surr_loss = skill_surr_loss # so that the relative magnitudes are correct if self.freeze_skills and not self.freeze_manager: raise NotImplementedError elif self.freeze_manager and not self.freeze_skills: raise NotImplementedError else: assert (not self.freeze_manager) or (not self.freeze_skills) input_list = [obs_var_raw, obs_var_sparse, action_var, advantage_var, mean_var, log_std_var] self.optimizer.update_opt( loss=surr_loss, target=self.policy, inputs=input_list ) return dict() # do the optimization def optimize_policy(self, itr, samples_data): print(len(samples_data['observations']), self.period) assert len(samples_data['observations']) % self.period == 0 # note that I have to do extra preprocessing to the advantages, and also create obs_var_sparse if self.use_skill_dependent_baseline: input_values = tuple(ext.extract( samples_data, "observations", "actions", "advantages", "agent_infos", "skill_advantages")) else: input_values = tuple(ext.extract( samples_data, "observations", "actions", "advantages", "agent_infos")) obs_raw = input_values[0].reshape(input_values[0].shape[0] // self.period, self.period, input_values[0].shape[1]) obs_sparse = input_values[0].take([i for i in range(0, input_values[0].shape[0], self.period)], axis=0) if not self.continuous_latent: advantage_sparse = input_values[2].reshape([input_values[2].shape[0] // self.period, self.period])[:, 0] latents = input_values[3]['latents'] latents_sparse = latents.take([i for i in range(0, latents.shape[0], self.period)], axis=0) prob = np.array( list(input_values[3]['prob'].take([i for i in range(0, latents.shape[0], self.period)], axis=0)), dtype=np.float32) mean = input_values[3]['mean'] log_std = input_values[3]['log_std'] if self.use_skill_dependent_baseline: advantage_var = input_values[4] else: advantage_var = input_values[2] # import ipdb; ipdb.set_trace() if self.freeze_skills and not self.freeze_manager: raise NotImplementedError elif self.freeze_manager and not self.freeze_skills: raise NotImplementedError else: assert (not self.freeze_manager) or (not self.freeze_skills) all_input_values = (obs_raw, obs_sparse, input_values[1], advantage_var, mean, log_std) # todo: assign current parameters to old policy; does this work? # old_param_values = self.policy.get_param_values(trainable=True) # self.old_policy.set_param_values(old_param_values, trainable=True) # old_param_values = self.policy.get_param_values() # self.old_policy.set_param_values(old_param_values) loss_before = self.optimizer.loss(all_input_values) self.optimizer.optimize(all_input_values) loss_after = self.optimizer.loss(all_input_values) logger.record_tabular('LossBefore', loss_before) logger.record_tabular('LossAfter', loss_after) logger.record_tabular('dLoss', loss_before - loss_after) return dict() def get_itr_snapshot(self, itr, samples_data): return dict( itr=itr, policy=self.policy, baseline=self.baseline, env=self.env ) def log_diagnostics(self, paths): # paths obtained by self.sampler.obtain_samples BatchPolopt.log_diagnostics(self, paths) # self.sampler.log_diagnostics(paths) # wasn't doing anything anyways # want to log the standard deviations # want to log the max and min of the actions
8,738
0
132
db8d1e3d87eeb6f3a0f5a5bfe9414cd167fe9148
1,064
py
Python
test/perspective.py
cburggie/py3D
d0fe21aae1d5ab77ec80f85533e941c902dad6f4
[ "MIT" ]
null
null
null
test/perspective.py
cburggie/py3D
d0fe21aae1d5ab77ec80f85533e941c902dad6f4
[ "MIT" ]
null
null
null
test/perspective.py
cburggie/py3D
d0fe21aae1d5ab77ec80f85533e941c902dad6f4
[ "MIT" ]
null
null
null
import pytrace import py3D filename = 'perspective.png' ppu = 400 passes = 16 S = py3D.Sphere C = py3D.Color V = py3D.Vector cam = pytrace.Camera(V(0.0,10.0,0.0), V(0.0,0.0,0.0), 2.0, 2.0, V(0.0,0.0,1.0)) cam.set_ppu(ppu) center = S(V(0.0,0.0,0.0), 1.0, C(0.99,0.99,0.99)).set_reflectivity(0.8) bodies = [] bodies.append( S(V(0.0,0.0,3.0), 1.0, C(0.01,0.01,0.01)) ) bodies.append( S(V(3.0,0.0,3.0).norm().scale(3.0), 1.0, C(0.99,0.01,0.01)) ) bodies.append( S(V(3.0,0.0,0.0), 1.0, C(0.99,0.99,0.01)) ) bodies.append( S(V(3.0,0.0,-3.0).norm().scale(3.0), 1.0, C(0.01,0.99,0.01)) ) bodies.append( S(V(0.0,0.0,-3.0), 1.0, C(0.01,0.99,0.99)) ) bodies.append( S(V(-3.0,0.0,-3.0).norm().scale(3.0), 1.0, C(0.01,0.01,0.99)) ) bodies.append( S(V(-3.0,0.0,0.0), 1.0, C(0.99,0.01,0.99)) ) bodies.append( S(V(-3.0,0.0,3.0).norm().scale(3.0), 1.0, C(0.99,0.99,0.99)) ) for b in bodies: b.set_reflectivity(0.4) bodies.append(center) world = pytrace.World(bodies, py3D.Sky(V(0.0,1.0,0.0), C(0.01,0.01,0.3)) ) pytrace.Tracer(world, cam).draw(passes).write(filename)
30.4
79
0.596805
import pytrace import py3D filename = 'perspective.png' ppu = 400 passes = 16 S = py3D.Sphere C = py3D.Color V = py3D.Vector cam = pytrace.Camera(V(0.0,10.0,0.0), V(0.0,0.0,0.0), 2.0, 2.0, V(0.0,0.0,1.0)) cam.set_ppu(ppu) center = S(V(0.0,0.0,0.0), 1.0, C(0.99,0.99,0.99)).set_reflectivity(0.8) bodies = [] bodies.append( S(V(0.0,0.0,3.0), 1.0, C(0.01,0.01,0.01)) ) bodies.append( S(V(3.0,0.0,3.0).norm().scale(3.0), 1.0, C(0.99,0.01,0.01)) ) bodies.append( S(V(3.0,0.0,0.0), 1.0, C(0.99,0.99,0.01)) ) bodies.append( S(V(3.0,0.0,-3.0).norm().scale(3.0), 1.0, C(0.01,0.99,0.01)) ) bodies.append( S(V(0.0,0.0,-3.0), 1.0, C(0.01,0.99,0.99)) ) bodies.append( S(V(-3.0,0.0,-3.0).norm().scale(3.0), 1.0, C(0.01,0.01,0.99)) ) bodies.append( S(V(-3.0,0.0,0.0), 1.0, C(0.99,0.01,0.99)) ) bodies.append( S(V(-3.0,0.0,3.0).norm().scale(3.0), 1.0, C(0.99,0.99,0.99)) ) for b in bodies: b.set_reflectivity(0.4) bodies.append(center) world = pytrace.World(bodies, py3D.Sky(V(0.0,1.0,0.0), C(0.01,0.01,0.3)) ) pytrace.Tracer(world, cam).draw(passes).write(filename)
0
0
0
7d9b7841c0014afe5edcc26667fa135680bc684d
6,061
py
Python
flask_app/app.py
bulatcute/tatfac
1bbfcff2622dd11a8caf69ff17268156720bb2c1
[ "MIT" ]
null
null
null
flask_app/app.py
bulatcute/tatfac
1bbfcff2622dd11a8caf69ff17268156720bb2c1
[ "MIT" ]
null
null
null
flask_app/app.py
bulatcute/tatfac
1bbfcff2622dd11a8caf69ff17268156720bb2c1
[ "MIT" ]
null
null
null
import asyncio import atexit import os import dotenv import requests from apscheduler.schedulers.background import BackgroundScheduler from flask import (Flask, make_response, redirect, render_template, request, send_from_directory, url_for) from flask_login import ( LoginManager, UserMixin, current_user, login_required, login_user, logout_user) from flask_sqlalchemy import SQLAlchemy from flask_app.edutatar import (check_login, facultative_info, get_diary, get_home_params, login_edu, my_facultatives, my_stars) from flask_app.forms import LoginForm dotenv.load_dotenv() application = Flask(__name__) application.config['SECRET_KEY'] = os.environ['SECRET_KEY'] application.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True # region DATABASE SETUP application.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db' db = SQLAlchemy(application) db.create_all() # endregion # region LOGIN SETUP login_manager = LoginManager() login_manager.init_app(application) @login_manager.user_loader @login_manager.unauthorized_handler sessions = {} for user in User.query.all(): s = requests.Session() login_edu(s, user.login, user.password) sessions[user.login] = s scheduler = BackgroundScheduler() scheduler.add_job(func=reload_sessions, trigger='interval', seconds=900) scheduler.start() atexit.register(lambda: scheduler.shutdown()) # endregion @application.route('/sw.js', methods=['GET']) @application.route('/', methods=['GET']) @login_required @application.route('/marks', methods=['GET']) @login_required @application.route('/marks/<int:term>', methods=['GET']) @login_required @application.route('/facultatives', methods=['GET']) @login_required @application.route('/facultative/<int:index>') @login_required @application.route('/diary') @login_required @application.route('/diary/<int:date>') @login_required @application.route('/login', methods=['GET', 'POST']) @application.route('/logout') @login_required
31.082051
107
0.671176
import asyncio import atexit import os import dotenv import requests from apscheduler.schedulers.background import BackgroundScheduler from flask import (Flask, make_response, redirect, render_template, request, send_from_directory, url_for) from flask_login import ( LoginManager, UserMixin, current_user, login_required, login_user, logout_user) from flask_sqlalchemy import SQLAlchemy from flask_app.edutatar import (check_login, facultative_info, get_diary, get_home_params, login_edu, my_facultatives, my_stars) from flask_app.forms import LoginForm dotenv.load_dotenv() application = Flask(__name__) application.config['SECRET_KEY'] = os.environ['SECRET_KEY'] application.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True # region DATABASE SETUP application.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///database.db' db = SQLAlchemy(application) class User(UserMixin, db.Model): id = db.Column(db.Integer, primary_key=True) login = db.Column(db.String, nullable=False, unique=True) avatar = db.Column(db.String) name = db.Column(db.String, nullable=False) password = db.Column(db.String, nullable=False) db.create_all() # endregion # region LOGIN SETUP login_manager = LoginManager() login_manager.init_app(application) @login_manager.user_loader def load_user(user_id): return User.query.get(user_id) @login_manager.unauthorized_handler def unauthorized_callback(): return redirect('/login?next=' + request.path) sessions = {} for user in User.query.all(): s = requests.Session() login_edu(s, user.login, user.password) sessions[user.login] = s def reload_sessions(): print('sessions reloading') for user in User.query.all(): s = requests.Session() login_edu(s, user.login, user.password) sessions[user.login] = s scheduler = BackgroundScheduler() scheduler.add_job(func=reload_sessions, trigger='interval', seconds=900) scheduler.start() atexit.register(lambda: scheduler.shutdown()) # endregion @application.route('/sw.js', methods=['GET']) def sw(): response = make_response(send_from_directory('static', 'sw.js')) response.headers['Cache-Control'] = 'no-cache' response.headers['Content-Type'] = 'application/javascript' return response @application.route('/', methods=['GET']) @login_required def index(): return redirect(url_for('diary')) @application.route('/marks', methods=['GET']) @login_required def marks(): data = {'login': current_user.login, 'name': current_user.name, 'avatar': current_user.avatar} stars = my_stars(sessions[current_user.login]) return render_template('marks.html', data=data, stars=stars[0], term=stars[1]) @application.route('/marks/<int:term>', methods=['GET']) @login_required def marks_with_term(term): data = {'login': current_user.login, 'name': current_user.name, 'avatar': current_user.avatar} stars = my_stars(sessions[current_user.login], term=term) return render_template('marks.html', data=data, stars=stars[0], term=str(stars[1])) @application.route('/facultatives', methods=['GET']) @login_required def facultatives(): data = {'login': current_user.login, 'name': current_user.name, 'avatar': current_user.avatar} facs = my_facultatives(sessions[current_user.login]) return render_template('facultatives.html', data=data, facs=facs) @application.route('/facultative/<int:index>') @login_required def facultative(index): data = {'login': current_user.login, 'name': current_user.name, 'avatar': current_user.avatar} info = facultative_info(sessions[current_user.login], index=index) return render_template('facultative.html', data=data, info=info) @application.route('/diary') @login_required def diary(): data = {'login': current_user.login, 'name': current_user.name, 'avatar': current_user.avatar} diary = get_diary(sessions[current_user.login]) return render_template('diary.html', data=data, diary=diary[0], next_page=diary[1], prev_page=diary[2]) @application.route('/diary/<int:date>') @login_required def diary_with_date(date): data = {'login': current_user.login, 'name': current_user.name, 'avatar': current_user.avatar} diary = get_diary( sessions[current_user.login], url=f'https://edu.tatar.ru/user/diary/week?date={date}') return render_template('diary.html', data=data, diary=diary[0], next_page=diary[1], prev_page=diary[2]) @application.route('/login', methods=['GET', 'POST']) def login(): form = LoginForm() if form.validate_on_submit(): login = form.login.data password = form.password.data s = requests.Session() login_edu(s, login, password) code = check_login(s) next = request.args.get('next') if code: user = User.query.filter_by(login=form.login.data).first() if not user: user_data = get_home_params(s) if not user_data['avatar']: user_data['avatar'] = url_for( 'static', filename='img/grayman.png') new_user = User( login=form.login.data, password=password, name=user_data[ 'name'], avatar=user_data['avatar'] ) db.session.add(new_user) db.session.commit() login_user(new_user, remember=True) login_edu(s, login, password) sessions[new_user.login] = s else: login_user(user, remember=True) return redirect(next or url_for('index')) else: return render_template('auth/login.html', form=form, message="Неправильный логин или пароль") return render_template('auth/login.html', form=form) @application.route('/logout') @login_required def logout(): logout_user() return redirect(url_for('login'))
3,445
256
310
b7db7352d4a55e0347fe2a602885c256089ac79d
62
py
Python
Clase1/HolaNombre.py
JoseCordobaEAN/EstructurasDeDatosUE4P
86a5c426d83d9d9ae86656c3c78324a1c07f608d
[ "MIT" ]
2
2019-08-17T21:15:47.000Z
2019-09-21T12:15:19.000Z
Clase1/HolaNombre.py
JoseCordobaEAN/EstructurasDeDatosUE4P
86a5c426d83d9d9ae86656c3c78324a1c07f608d
[ "MIT" ]
null
null
null
Clase1/HolaNombre.py
JoseCordobaEAN/EstructurasDeDatosUE4P
86a5c426d83d9d9ae86656c3c78324a1c07f608d
[ "MIT" ]
null
null
null
nombre = input("¿Cual es tu nombre?") print(f'Hola {nombre}')
20.666667
37
0.66129
nombre = input("¿Cual es tu nombre?") print(f'Hola {nombre}')
0
0
0
011f123996b9652d7897969a4b6e12700739981a
765
py
Python
main/MouseAtlas.py
jimmayxu/scVI
23cb7597e0b0677736a14c903c8053ad62c74938
[ "MIT" ]
null
null
null
main/MouseAtlas.py
jimmayxu/scVI
23cb7597e0b0677736a14c903c8053ad62c74938
[ "MIT" ]
null
null
null
main/MouseAtlas.py
jimmayxu/scVI
23cb7597e0b0677736a14c903c8053ad62c74938
[ "MIT" ]
null
null
null
import os os.getcwd() import sys sys.path.append('scvi') import torch import numpy as np import pandas as pd import scanpy as sc save_path = "/lustre/scratch117/cellgen/team205/tpcg/backup/backup_20190401/sc_sclassification/CellTypist/data_repo/MouseAtlas/MouseAtlas.total.h5ad" save_path2 = "/lustre/scratch117/cellgen/team205/tpcg/human_data/HumanAtlas.h5ad" adata_mouse = sc.read_h5ad(save_path) adata_human = sc.read_h5ad(save_path2) # sc.read_10x_mtx save_path = "/lustre/scratch117/cellgen/team205/zx3/pooled_2019-03-21" adata = sc.read_10x_mtx(save_path) import scvi from dataset.anndata import AnnDataset from inference import UnsupervisedTrainer from models.vae import VAE from typing import Tuple import scvi from models.modules import Encoder
22.5
149
0.816993
import os os.getcwd() import sys sys.path.append('scvi') import torch import numpy as np import pandas as pd import scanpy as sc save_path = "/lustre/scratch117/cellgen/team205/tpcg/backup/backup_20190401/sc_sclassification/CellTypist/data_repo/MouseAtlas/MouseAtlas.total.h5ad" save_path2 = "/lustre/scratch117/cellgen/team205/tpcg/human_data/HumanAtlas.h5ad" adata_mouse = sc.read_h5ad(save_path) adata_human = sc.read_h5ad(save_path2) # sc.read_10x_mtx save_path = "/lustre/scratch117/cellgen/team205/zx3/pooled_2019-03-21" adata = sc.read_10x_mtx(save_path) import scvi from dataset.anndata import AnnDataset from inference import UnsupervisedTrainer from models.vae import VAE from typing import Tuple import scvi from models.modules import Encoder
0
0
0
c7f59edb3a5c8d3619ad68f6b6967bea28d3ba40
3,792
py
Python
shell/shell.py
kreusada/JackCogs
f1c5b30e0656cb9634fa8bb0d6b00aa7c2384c40
[ "Apache-2.0" ]
null
null
null
shell/shell.py
kreusada/JackCogs
f1c5b30e0656cb9634fa8bb0d6b00aa7c2384c40
[ "Apache-2.0" ]
null
null
null
shell/shell.py
kreusada/JackCogs
f1c5b30e0656cb9634fa8bb0d6b00aa7c2384c40
[ "Apache-2.0" ]
null
null
null
# Copyright 2018-2021 Jakub Kuczys (https://github.com/jack1142) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import asyncio.subprocess as asp import contextlib from typing import Any, Dict, List, Literal from redbot.core import commands from redbot.core.bot import Red from .errors import ProcessTerminatedEarly from .utils import get_env, send_pages, wait_for_result RequestType = Literal["discord_deleted_user", "owner", "user", "user_strict"] class Shell(commands.Cog): """Run shell commands on bot's system from Discord.""" @commands.is_owner() @commands.command() async def shell(self, ctx: commands.Context, *, command: str) -> None: """Run shell command.""" await self._shell_command(ctx, command) @commands.is_owner() @commands.command() async def shellq(self, ctx: commands.Context, *, command: str) -> None: """ Run shell command quietly. If command's exit code is 0, `[p]shellq` will only send a tick reaction. Otherwise, the result will be shown as with regular `[p]shell` command. """ await self._shell_command(ctx, command, send_message_on_success=False) @commands.is_owner() @commands.command() async def killshells(self, ctx: commands.Context) -> None: """Kill all shell processes started by Shell cog.""" async with self._killing_lock: for p in reversed(self.active_processes): # in case some Process is still here after it terminated if p.returncode is None: p.kill() self.active_processes.pop() await ctx.send("Killed all active shell processes.")
35.111111
86
0.629483
# Copyright 2018-2021 Jakub Kuczys (https://github.com/jack1142) # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio import asyncio.subprocess as asp import contextlib from typing import Any, Dict, List, Literal from redbot.core import commands from redbot.core.bot import Red from .errors import ProcessTerminatedEarly from .utils import get_env, send_pages, wait_for_result RequestType = Literal["discord_deleted_user", "owner", "user", "user_strict"] class Shell(commands.Cog): """Run shell commands on bot's system from Discord.""" def __init__(self, bot: Red) -> None: self.bot = bot self.active_processes: List[asp.Process] = [] self._killing_lock = asyncio.Lock() async def red_get_data_for_user(self, *, user_id: int) -> Dict[str, Any]: # this cog does not story any data return {} async def red_delete_data_for_user( self, *, requester: RequestType, user_id: int ) -> None: # this cog does not story any data pass @commands.is_owner() @commands.command() async def shell(self, ctx: commands.Context, *, command: str) -> None: """Run shell command.""" await self._shell_command(ctx, command) @commands.is_owner() @commands.command() async def shellq(self, ctx: commands.Context, *, command: str) -> None: """ Run shell command quietly. If command's exit code is 0, `[p]shellq` will only send a tick reaction. Otherwise, the result will be shown as with regular `[p]shell` command. """ await self._shell_command(ctx, command, send_message_on_success=False) async def _shell_command( self, ctx: commands.Context, command: str, *, send_message_on_success: bool = True, ) -> None: async with ctx.typing(): async with self._killing_lock: p = await asp.create_subprocess_shell( command, stdout=asp.PIPE, stderr=asp.STDOUT, env=get_env() ) self.active_processes.append(p) try: output = await wait_for_result(p) except ProcessTerminatedEarly as e: output = e.partial_output prefix = ( "**Command was terminated early and this is a partial output.**\n" ) else: prefix = "" async with self._killing_lock: with contextlib.suppress(ValueError): self.active_processes.remove(p) if not send_message_on_success and p.returncode == 0: await ctx.tick() else: await send_pages(ctx, command=command, output=output, prefix=prefix) @commands.is_owner() @commands.command() async def killshells(self, ctx: commands.Context) -> None: """Kill all shell processes started by Shell cog.""" async with self._killing_lock: for p in reversed(self.active_processes): # in case some Process is still here after it terminated if p.returncode is None: p.kill() self.active_processes.pop() await ctx.send("Killed all active shell processes.")
1,481
0
108
1aca8312e8b3a002362d2b49a0e848391cf47680
2,032
py
Python
markov.py
AVMatthews/Outlet
c84765ef1288d206f09cdace5841d36192782875
[ "CC0-1.0" ]
null
null
null
markov.py
AVMatthews/Outlet
c84765ef1288d206f09cdace5841d36192782875
[ "CC0-1.0" ]
null
null
null
markov.py
AVMatthews/Outlet
c84765ef1288d206f09cdace5841d36192782875
[ "CC0-1.0" ]
null
null
null
import re; import random import sys # splitter = re.compile('.', re.MULTILINE) if __name__ == '__main__': k = int(sys.argv[1]) n = int(sys.argv[2]) ma = Markov(n) for i in sys.argv[3:]: ma.Feed(open(i, 'r').read()) #print ma.probs #print ma.Dump(int(sys.argv[1])) words, hits = ma.Generate(k, random.choice) print ' '.join(words) #print ''.join(words) print>>sys.stderr, hits, 'hits', (100*float(hits)/k), '%'
32.774194
79
0.468012
import re; import random import sys class Markov(object): splitter = re.compile(r'([\w\'!\?\.,<>="\'/:]+)') # splitter = re.compile('.', re.MULTILINE) def __init__(self, n = 1): self.probs = {} self.n = n def Feed(self, str): self.Update(map(lambda x: x.lower(), self.splitter.findall(str))) #self.Update(map(lambda x: x.lower(), str)) def Update(self, words): n = self.n if len(words) < 2: return for idx in range(len(words)): for i in range(1, n + 1): if idx < i: continue vector = tuple(words[idx - i:idx]) self.probs.setdefault(vector, []).append(words[idx]) def Generate(self, k, choicef): all_wls = self.probs.values() word = choicef(choicef(all_wls)) result = [None]*k hits = 0 for i in range(k): result[i] = word if i < self.n: word = choicef(choicef(all_wls)) else: vector = tuple(result[i - self.n + 1:i + 1]) while vector: wl = self.probs.get(vector, []) if wl: hits += 1 word = choicef(wl) #print 'prev:', vector, 'words:', len(wl), 'word', word break else: vector = vector[1:] else: word = choicef(choicef(all_wls)) return result, hits def Dump(self, k): return ' '.join(self.Generate(k, random.choice)[0]) if __name__ == '__main__': k = int(sys.argv[1]) n = int(sys.argv[2]) ma = Markov(n) for i in sys.argv[3:]: ma.Feed(open(i, 'r').read()) #print ma.probs #print ma.Dump(int(sys.argv[1])) words, hits = ma.Generate(k, random.choice) print ' '.join(words) #print ''.join(words) print>>sys.stderr, hits, 'hits', (100*float(hits)/k), '%'
1,363
54
153
d05ba470162792a18c8b6797afd697c420796a34
3,882
py
Python
batchflow/models/tf/pyramidnet.py
bestetc/batchflow
d2a843640383fbe860654236881483f755227e06
[ "Apache-2.0" ]
87
2018-11-16T08:04:12.000Z
2022-03-24T20:08:44.000Z
batchflow/models/tf/pyramidnet.py
bestetc/batchflow
d2a843640383fbe860654236881483f755227e06
[ "Apache-2.0" ]
243
2018-11-29T02:03:55.000Z
2022-02-21T08:28:29.000Z
batchflow/models/tf/pyramidnet.py
bestetc/batchflow
d2a843640383fbe860654236881483f755227e06
[ "Apache-2.0" ]
35
2019-01-29T14:26:14.000Z
2021-12-30T01:39:02.000Z
""" Dongyoon Han et al. "`Deep Pyramidal Residual Networks <https://arxiv.org/abs/1610.02915>`_" """ from . import ResNet class PyramidNet(ResNet): """ The base PyramidNet model Notes ----- This class is intended to define custom PyramidNets. For more convenience use predefined :class:`.tf.PyramidNet18`, :class:`.tf.PyramidNet34`, and others described down below. **Configuration** inputs : dict dict with 'images' and 'labels' (see :meth:`~.TFModel._make_inputs`) initial_block : dict parameters for the initial block (see :func:`.conv_block`). body : dict num_blocks : list of int number of blocks in each group with the same number of filters. block : dict widening : int an increment of filters number in each block (default=8) and other :class:`~.tf.ResNet` block params head : dict 'Vdf' with dropout_rate=.4 Notes ----- Also see :class:`~.TFModel` and :class:`~.tf.ResNet` configuration. """ @classmethod def default_config(cls): """ Define model defaults. See :meth: `~.TFModel.default_config` """ config = ResNet.default_config() config['body/block/widening'] = 8 config['body/block/zero_pad'] = True return config @classmethod def default_layout(cls, bottleneck, **kwargs): """ Define conv block layout """ _ = kwargs return 'nc nac nac n' if bottleneck else 'nc nac n' def build_config(self, names=None): """ Define model's architecture configuration. See :meth: `~.TFModel.build_config` """ config = super(ResNet, self).build_config(names) if config.get('body/filters') is None: w = config['body/block/widening'] filters = config['initial_block/filters'] config['body/filters'] = [] for g in config['body/num_blocks']: bfilters = [filters + w * b for b in range(1, g + 1)] filters = bfilters[-1] config['body/filters'].append(bfilters) if config.get('head/units') is None: config['head/units'] = self.num_classes('targets') if config.get('head/filters') is None: config['head/filters'] = self.num_classes('targets') return config class PyramidNet18(PyramidNet): """ 18-layer PyramidNet architecture """ @classmethod class PyramidNet34(PyramidNet): """ 34-layer PyramidNet architecture """ @classmethod class PyramidNet50(PyramidNet): """ 50-layer PyramidNet architecture with bottleneck blocks """ @classmethod class PyramidNet101(PyramidNet): """ 101-layer PyramidNet architecture with bottleneck blocks """ @classmethod class PyramidNet152(PyramidNet): """ 152-layer PyramidNet architecture with bottleneck blocks """ @classmethod
31.056
94
0.617723
""" Dongyoon Han et al. "`Deep Pyramidal Residual Networks <https://arxiv.org/abs/1610.02915>`_" """ from . import ResNet class PyramidNet(ResNet): """ The base PyramidNet model Notes ----- This class is intended to define custom PyramidNets. For more convenience use predefined :class:`.tf.PyramidNet18`, :class:`.tf.PyramidNet34`, and others described down below. **Configuration** inputs : dict dict with 'images' and 'labels' (see :meth:`~.TFModel._make_inputs`) initial_block : dict parameters for the initial block (see :func:`.conv_block`). body : dict num_blocks : list of int number of blocks in each group with the same number of filters. block : dict widening : int an increment of filters number in each block (default=8) and other :class:`~.tf.ResNet` block params head : dict 'Vdf' with dropout_rate=.4 Notes ----- Also see :class:`~.TFModel` and :class:`~.tf.ResNet` configuration. """ @classmethod def default_config(cls): """ Define model defaults. See :meth: `~.TFModel.default_config` """ config = ResNet.default_config() config['body/block/widening'] = 8 config['body/block/zero_pad'] = True return config @classmethod def default_layout(cls, bottleneck, **kwargs): """ Define conv block layout """ _ = kwargs return 'nc nac nac n' if bottleneck else 'nc nac n' def build_config(self, names=None): """ Define model's architecture configuration. See :meth: `~.TFModel.build_config` """ config = super(ResNet, self).build_config(names) if config.get('body/filters') is None: w = config['body/block/widening'] filters = config['initial_block/filters'] config['body/filters'] = [] for g in config['body/num_blocks']: bfilters = [filters + w * b for b in range(1, g + 1)] filters = bfilters[-1] config['body/filters'].append(bfilters) if config.get('head/units') is None: config['head/units'] = self.num_classes('targets') if config.get('head/filters') is None: config['head/filters'] = self.num_classes('targets') return config class PyramidNet18(PyramidNet): """ 18-layer PyramidNet architecture """ @classmethod def default_config(cls): config = PyramidNet.default_config() config['body/num_blocks'] = [2, 2, 2, 2] config['body/block/bottleneck'] = False return config class PyramidNet34(PyramidNet): """ 34-layer PyramidNet architecture """ @classmethod def default_config(cls): config = PyramidNet.default_config() config['body/num_blocks'] = [3, 4, 6, 3] config['body/block/bottleneck'] = False return config class PyramidNet50(PyramidNet): """ 50-layer PyramidNet architecture with bottleneck blocks """ @classmethod def default_config(cls): config = PyramidNet.default_config() config['body/num_blocks'] = [3, 4, 6, 3] config['body/block/bottleneck'] = True return config class PyramidNet101(PyramidNet): """ 101-layer PyramidNet architecture with bottleneck blocks """ @classmethod def default_config(cls): config = PyramidNet.default_config() config['body/num_blocks'] = [3, 4, 23, 3] config['body/block/bottleneck'] = True return config class PyramidNet152(PyramidNet): """ 152-layer PyramidNet architecture with bottleneck blocks """ @classmethod def default_config(cls): config = PyramidNet.default_config() config['body/num_blocks'] = [3, 8, 36, 3] config['body/block/bottleneck'] = True return config
834
0
130
814151f26dafa0c3a95ca7fedafaed5b2414e027
18,627
py
Python
MiNTiF_Utils/cnn/model_zoos/model_zoo_default.py
CAiM-lab/MiNTiF
b292670d3cc97f884fa02fb60229869df59fccbc
[ "MIT" ]
2
2021-02-25T10:22:10.000Z
2022-02-09T12:42:31.000Z
MiNTiF_Utils/cnn/model_zoos/model_zoo_default.py
CAiM-lab/MiNTiF
b292670d3cc97f884fa02fb60229869df59fccbc
[ "MIT" ]
null
null
null
MiNTiF_Utils/cnn/model_zoos/model_zoo_default.py
CAiM-lab/MiNTiF
b292670d3cc97f884fa02fb60229869df59fccbc
[ "MIT" ]
2
2021-09-22T12:56:18.000Z
2021-11-29T18:22:03.000Z
# (c) 2019-2021, Alvaro Gomariz @ ETH Zurich # Computer-assisted Applications in Medicine (CAiM) Group, Prof. Orcun Goksel import numpy as np import tensorflow as tf from cnn import layers_keras as clayers, losses, metrics_keras as cmetrics import logging logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) class CustomModel: """ Base class to build other models on top """ optimizer = tf.keras.optimizers.Adam() # optimizer = tf.keras.optimizers.Adam(0.000001) activation = 'linear' nlabels = 0 loss_name = 'crossentropy' loss_weights = None att_minput = ('att_domchcomb', 'att_chcomb', 'att_domchcomb_cust1', 'att_domchcomb_sm', 'att_domchcomb_cust1sm', 'att_domchcomb_cust2') name_metrictrack = "Fscore_labelmean" def __init__(self, msettings, class_weights=None, name='CustomModel', **kwargs): """ Class initialization Parameters ---------- msettings : dict dictionary describing the model settings. Normally loaded from an external .json file class_weights : list or None list of floats indicating the weight for each of the classes name: str name assigned to identify the created model """ # Data settings self.name = name self.msettings = msettings self.msettings.update({ 'activation': self.activation, 'nchannels': len(msettings['channels']), 'nlabels': len(msettings['labels']) + 1 }) self.class_weights = class_weights self.patch_size = msettings['patch_size'] self.patch_size_out = msettings['patch_size_out'] self.nchannels = len(msettings['channels']) self.input_aux = tf.keras.Input(shape=self.patch_size + [self.nchannels], name="input") self.input_shape_aux = self.input_aux.shape # self.input_shape = self.patch_size + [self.nchannels] # Network settings if 'nnNodes' in msettings: self.nn_nodes = np.array(msettings['nnNodes']) else: self.nn_nodes = np.array([[64, 128, 256, 512, 1024], [1024, 512, 256, 128, 64]]) self.do_batch_norm = msettings['batchnorm'] self.padding = msettings['padding'] # Use extra label in segmentation (for bg) extra_labels = 0 if ( ('dataset_type' in self.msettings) and self.msettings['dataset_type'] in ('detection')) else 1 self.nlabels = len(msettings['labels']) + extra_labels self.scale_factor = msettings['scale_factor'] self.ksize = msettings['kernel_size'] self.nlevels = msettings['nlevels'] # Graph settings self._graph_built = False self._model_built = False # self.run_eagerly = system.eager_mode # if self.run_eagerly: # logger.warning("The model is running in eager mode, so it will be slower") if not (self.activation in ('softmax', 'sigmoid')): self.is_logits = True def get_loss(self): """ Parses the string in self.loss_name to assign the proper loss Returns ------- class:`tf.losses.Loss` loss function for the model """ if self.loss_name == 'dice': return losses.DiceLoss(mode='macro_robust', only_foreground=True) elif self.loss_name == 'crossentropy': if self.class_weights is None: self.class_weights = [1] * self.nlabels logger.warning("class_weights should be declared for weighted cross entropy") return losses.CrossEntropyWeighted(self.class_weights) else: raise Exception("The loss {} has not been implemented".format(self.loss_name)) def model_fromlayer(self, nlayer, name=None): """ Create a model from a specific layer Parameters ---------- nlayer : int or str layer number or name that can be recognized within the model name str or None name assigned to the newly created model Returns ------- class:`tf.keras.Model` newly created model """ m_aux = self.build_model() if isinstance(nlayer, int): layer = m_aux.layers[nlayer] elif isinstance(nlayer, str): layer = m_aux.get_layer(nlayer) else: raise Exception("The layer type was not recognized") return tf.keras.Model(inputs=self.input_aux, outputs=layer.output, name=name) def build_model(self, name=None): """ Created a keras model from the layers stablished in this class Parameters ---------- name str or None name of the model Returns ------- class:`tf.keras.Model` keras model """ name = name or self.name model = tf.keras.Model(self.input_aux, self.call(self.input_aux), name=name) return model def build_metrics(self, do_idclass=False, **kwargs): """ Builds the custom metrics required for the defined mdoel Parameters ---------- do_idclass: bool Indicates if classes are treated separately (True) or aggregated together (False) Returns ------- class:`tf.keras.metrics.Metric` Metrics for the model """ return cmetrics.get_confmetrics(self.nlabels, self.is_logits, do_idclass=do_idclass) def get_vislayers(self, model=None, names=None): """ Defines the layers that will be visualized in tensorboard Parameters ---------- model: class:`tf.keras.Model` or None model from which the visualizations are to be obtained names: list (of str) or None names of the layers to be visualized Returns ------- class:`tf.keras.Model` Keras model that outputs the different defined layers """ if names: layers = [layer.output for layer in model.layers if not isinstance(layer.output, list) and any([n in layer.name for n in names])] else: layers = [layer.output for layer in model.layers if not isinstance(layer.output, list) and len(layer.output.shape) > 3] try: mlayers = tf.keras.Model(inputs=model.input, outputs=layers) except ValueError: logger.warning("Layers could not be obtained for visualization") mlayers = None return mlayers class UNet(CustomModel): """ Custom 2D U-Net model References ---------- .. [1] Olaf Ronneberger, Philipp Fischer, Thomas Brox, *U-Net: Convolutional Networks for Biomedical Image Segmentation*, MICCAI 2015 """ def __init__(self, nodes_factor=2, do_outlayer=True, channel_gather=None, chdrop=None, drop_rate=0.5, do_att=None, pos_att=None, nn_nodes=None, ndims=2, unsupervised_settings=None, **kwargs): """ Class initialization Parameters ---------- nodes_factor : int Reduce the number of nodes in every layer by the factor indicated here do_outlayer : bool Employ a last layer that outputs the classes (True) or output logits (False) channel_gather : int or None If int, the channel corresponding to that id will be employed by the network chdrop : str or None If str, defines the type of MarkerSampling employed as defined in `cnn.layers_keras.lchdrop` drop_rate : float If chdrop is True, this indicated the sampling ratio do_att : str or None If str, defines the type of attention employed as defined in `cnn.layers_keras.get_attention_layer` pos_att : list (of strs) or None It indicates the parts of the UNet where attention is employed. If None, it is used in ('encoder', 'bottleneck', 'decoder') nn_nodes : list (of lists (of ints)) or None Defines the number of nodes in every layer of the model. kwargs : key, value mappings Other keyword arguments are passed through to class:`CustomModel` """ super().__init__(**kwargs) self.do_outlayer = do_outlayer if nn_nodes is not None: self.nn_nodes = nn_nodes else: self.nn_nodes = self.nn_nodes // nodes_factor if channel_gather is not None: if chdrop: raise Exception("chdrop and channel_gather cannot be used together") self.do_channelgather = True self.channelgather = clayers.ChannelGather(channel_gather) chname = "BE" + str(channel_gather) + "_" else: self.do_channelgather = False chname = "" self.chdrop = chdrop self.chdrop_multi = False self.drop_rate = drop_rate self.ndims = ndims if self.chdrop is not None: self.chdrop_multi = 'chcomb' in self.chdrop self.drop_channels = clayers.lchdrop(self.chdrop, drop_rate) if self.chdrop == 'att_chcomb': self.drop_channels_att = clayers.ChCombAtt(name="ChDrop_auxAtt") self.encoder = [None] * (self.nlevels) self.encoder_skip = [None] * (self.nlevels - 1) self.decoder = [None] * (self.nlevels - 1) self.crop_encoder = [None] * (self.nlevels - 1) self.concat = [tf.keras.layers.Concatenate() for _ in range(self.nlevels - 1)] if ndims == 2: self.down = [ tf.keras.layers.MaxPool2D(self.scale_factor, padding=self.padding, name=chname + "downsample_level" + str(l)) for l in range(self.nlevels - 1)] elif ndims == 3: self.down = [ tf.keras.layers.MaxPool3D(self.scale_factor, padding=self.padding, name=chname + "downsample_level" + str(l)) for l in range(self.nlevels - 1)] else: logger.error("Method not implemented for {} dimensions".format(ndims)) self.up = [clayers.upsampling_custom( nodes=self.nn_nodes[1][l + 1], scale_factor=self.scale_factor, ndims=ndims, name=chname + "upsample_level" + str(l) ) for l in range(self.nlevels - 1)] self.do_att = do_att self.act2 = None if self.do_att == 'att_chcomb_postact' else 'relu' if self.do_att is None: self.pos_att = {k: False for k in ('encoder', 'bottleneck', 'decoder')} self.att_multi = False else: self.pos_att = pos_att or {k: True for k in ('encoder', 'bottleneck', 'decoder')} self.att_multi = 'chcomb' in self.do_att fatt = clayers.get_attention_layer(self.do_att) # self.latt = [[ # fatt("SE_" + net + str(nl)) for nl in range(self.nlevels) # ] for laux, net in zip((self.nlevels, self.nlevels - 1), ('e', 'd'))] self.latt_enc = [ fatt("SE_enc_l" + str(nl)) for nl in range(self.nlevels - 1) ] if self.pos_att['encoder'] else None self.latt_dec = [ fatt("SE_dec_l" + str(nl)) for nl in range(self.nlevels - 1) ] if self.pos_att['decoder'] else None self.latt_bottleneck = fatt("SE_bn") if self.pos_att['bottleneck'] else None self.encoder[0] = clayers.blockconv(self.nn_nodes[0][0], do_batchnorm=self.do_batch_norm, ksize=self.ksize, padding=self.padding, ndims=ndims, name=chname + "BlockDown_level0", rdrop=float(self.msettings["dropout"])) for level in range(1, self.nlevels): self.encoder[level] = clayers.blockconv(self.nn_nodes[0][level], do_batchnorm=self.do_batch_norm, ksize=self.ksize, padding=self.padding, activation2=self.act2, ndims=ndims, name=chname + "BlockDown_level" + str(level), rdrop=float(self.msettings["dropout"])) self.decoder[level - 1] = clayers.blockconv(self.nn_nodes[1][level], do_batchnorm=self.do_batch_norm, ksize=self.ksize, padding=self.padding, activation2=self.act2, ndims=ndims, name=chname + "BlockUp_level" + str(level), rdrop=float(self.msettings["dropout"])) self.crop_encoder[level - 1] = clayers.skipconnect( level - 1, self.nlevels, scale_factor=self.scale_factor, padding=self.padding, ndims=ndims, name=chname + "cropping_level" + str(level)) if self.do_outlayer: if ndims == 2: self.final_layer = tf.keras.layers.Conv2D(filters=self.nlabels, kernel_size=(1, 1), padding=self.padding, activation=self.activation, name=chname + 'output_layer') elif ndims == 3: self.final_layer = tf.keras.layers.Conv3D(filters=self.nlabels, kernel_size=(1, 1, 1), padding=self.padding, activation=self.activation, name=chname + 'output_layer') else: logger.error("Method not implemented for {} dimensions".format(ndims)) else: self.final_layer = tf.keras.layers.Lambda(lambda x: x, name=chname + 'output_layer') # @tf.function class UNet_MarkerDrop_MarkerExcite(UNet2D): """ MS-ME model """ class MarkerSampling_MarkerExcite(UNet_MarkerDrop_MarkerExcite): """ Alias for UNet_MarkerDrop_MarkerExcite """ pass class UNet3D(UNet): """ Custom 3D U-Net model """
40.231102
137
0.543995
# (c) 2019-2021, Alvaro Gomariz @ ETH Zurich # Computer-assisted Applications in Medicine (CAiM) Group, Prof. Orcun Goksel import numpy as np import tensorflow as tf from cnn import layers_keras as clayers, losses, metrics_keras as cmetrics import logging logger = logging.getLogger(__name__) logger.setLevel(logging.DEBUG) class CustomModel: """ Base class to build other models on top """ optimizer = tf.keras.optimizers.Adam() # optimizer = tf.keras.optimizers.Adam(0.000001) activation = 'linear' nlabels = 0 loss_name = 'crossentropy' loss_weights = None att_minput = ('att_domchcomb', 'att_chcomb', 'att_domchcomb_cust1', 'att_domchcomb_sm', 'att_domchcomb_cust1sm', 'att_domchcomb_cust2') name_metrictrack = "Fscore_labelmean" def __init__(self, msettings, class_weights=None, name='CustomModel', **kwargs): """ Class initialization Parameters ---------- msettings : dict dictionary describing the model settings. Normally loaded from an external .json file class_weights : list or None list of floats indicating the weight for each of the classes name: str name assigned to identify the created model """ # Data settings self.name = name self.msettings = msettings self.msettings.update({ 'activation': self.activation, 'nchannels': len(msettings['channels']), 'nlabels': len(msettings['labels']) + 1 }) self.class_weights = class_weights self.patch_size = msettings['patch_size'] self.patch_size_out = msettings['patch_size_out'] self.nchannels = len(msettings['channels']) self.input_aux = tf.keras.Input(shape=self.patch_size + [self.nchannels], name="input") self.input_shape_aux = self.input_aux.shape # self.input_shape = self.patch_size + [self.nchannels] # Network settings if 'nnNodes' in msettings: self.nn_nodes = np.array(msettings['nnNodes']) else: self.nn_nodes = np.array([[64, 128, 256, 512, 1024], [1024, 512, 256, 128, 64]]) self.do_batch_norm = msettings['batchnorm'] self.padding = msettings['padding'] # Use extra label in segmentation (for bg) extra_labels = 0 if ( ('dataset_type' in self.msettings) and self.msettings['dataset_type'] in ('detection')) else 1 self.nlabels = len(msettings['labels']) + extra_labels self.scale_factor = msettings['scale_factor'] self.ksize = msettings['kernel_size'] self.nlevels = msettings['nlevels'] # Graph settings self._graph_built = False self._model_built = False # self.run_eagerly = system.eager_mode # if self.run_eagerly: # logger.warning("The model is running in eager mode, so it will be slower") if not (self.activation in ('softmax', 'sigmoid')): self.is_logits = True def get_loss(self): """ Parses the string in self.loss_name to assign the proper loss Returns ------- class:`tf.losses.Loss` loss function for the model """ if self.loss_name == 'dice': return losses.DiceLoss(mode='macro_robust', only_foreground=True) elif self.loss_name == 'crossentropy': if self.class_weights is None: self.class_weights = [1] * self.nlabels logger.warning("class_weights should be declared for weighted cross entropy") return losses.CrossEntropyWeighted(self.class_weights) else: raise Exception("The loss {} has not been implemented".format(self.loss_name)) def compute_output_shape(self, input_shape=None): # super(CustomModel, self).build(self.input_shape_aux) return self.patch_size_out + [self.nlabels] def get_summary_scalars(self, **kwargs): return None def call(self, inputs): logger.warning("Calling wrong method") return def model_fromlayer(self, nlayer, name=None): """ Create a model from a specific layer Parameters ---------- nlayer : int or str layer number or name that can be recognized within the model name str or None name assigned to the newly created model Returns ------- class:`tf.keras.Model` newly created model """ m_aux = self.build_model() if isinstance(nlayer, int): layer = m_aux.layers[nlayer] elif isinstance(nlayer, str): layer = m_aux.get_layer(nlayer) else: raise Exception("The layer type was not recognized") return tf.keras.Model(inputs=self.input_aux, outputs=layer.output, name=name) def build_model(self, name=None): """ Created a keras model from the layers stablished in this class Parameters ---------- name str or None name of the model Returns ------- class:`tf.keras.Model` keras model """ name = name or self.name model = tf.keras.Model(self.input_aux, self.call(self.input_aux), name=name) return model def build_metrics(self, do_idclass=False, **kwargs): """ Builds the custom metrics required for the defined mdoel Parameters ---------- do_idclass: bool Indicates if classes are treated separately (True) or aggregated together (False) Returns ------- class:`tf.keras.metrics.Metric` Metrics for the model """ return cmetrics.get_confmetrics(self.nlabels, self.is_logits, do_idclass=do_idclass) def set_metrics(self): return { "train": cmetrics.get_confmetrics(self.nlabels, self.is_logits), "val": cmetrics.get_confmetrics(self.nlabels, self.is_logits), "test": cmetrics.get_confmetrics(self.nlabels, self.is_logits) } def get_vislayers(self, model=None, names=None): """ Defines the layers that will be visualized in tensorboard Parameters ---------- model: class:`tf.keras.Model` or None model from which the visualizations are to be obtained names: list (of str) or None names of the layers to be visualized Returns ------- class:`tf.keras.Model` Keras model that outputs the different defined layers """ if names: layers = [layer.output for layer in model.layers if not isinstance(layer.output, list) and any([n in layer.name for n in names])] else: layers = [layer.output for layer in model.layers if not isinstance(layer.output, list) and len(layer.output.shape) > 3] try: mlayers = tf.keras.Model(inputs=model.input, outputs=layers) except ValueError: logger.warning("Layers could not be obtained for visualization") mlayers = None return mlayers class UNet(CustomModel): """ Custom 2D U-Net model References ---------- .. [1] Olaf Ronneberger, Philipp Fischer, Thomas Brox, *U-Net: Convolutional Networks for Biomedical Image Segmentation*, MICCAI 2015 """ def __init__(self, nodes_factor=2, do_outlayer=True, channel_gather=None, chdrop=None, drop_rate=0.5, do_att=None, pos_att=None, nn_nodes=None, ndims=2, unsupervised_settings=None, **kwargs): """ Class initialization Parameters ---------- nodes_factor : int Reduce the number of nodes in every layer by the factor indicated here do_outlayer : bool Employ a last layer that outputs the classes (True) or output logits (False) channel_gather : int or None If int, the channel corresponding to that id will be employed by the network chdrop : str or None If str, defines the type of MarkerSampling employed as defined in `cnn.layers_keras.lchdrop` drop_rate : float If chdrop is True, this indicated the sampling ratio do_att : str or None If str, defines the type of attention employed as defined in `cnn.layers_keras.get_attention_layer` pos_att : list (of strs) or None It indicates the parts of the UNet where attention is employed. If None, it is used in ('encoder', 'bottleneck', 'decoder') nn_nodes : list (of lists (of ints)) or None Defines the number of nodes in every layer of the model. kwargs : key, value mappings Other keyword arguments are passed through to class:`CustomModel` """ super().__init__(**kwargs) self.do_outlayer = do_outlayer if nn_nodes is not None: self.nn_nodes = nn_nodes else: self.nn_nodes = self.nn_nodes // nodes_factor if channel_gather is not None: if chdrop: raise Exception("chdrop and channel_gather cannot be used together") self.do_channelgather = True self.channelgather = clayers.ChannelGather(channel_gather) chname = "BE" + str(channel_gather) + "_" else: self.do_channelgather = False chname = "" self.chdrop = chdrop self.chdrop_multi = False self.drop_rate = drop_rate self.ndims = ndims if self.chdrop is not None: self.chdrop_multi = 'chcomb' in self.chdrop self.drop_channels = clayers.lchdrop(self.chdrop, drop_rate) if self.chdrop == 'att_chcomb': self.drop_channels_att = clayers.ChCombAtt(name="ChDrop_auxAtt") self.encoder = [None] * (self.nlevels) self.encoder_skip = [None] * (self.nlevels - 1) self.decoder = [None] * (self.nlevels - 1) self.crop_encoder = [None] * (self.nlevels - 1) self.concat = [tf.keras.layers.Concatenate() for _ in range(self.nlevels - 1)] if ndims == 2: self.down = [ tf.keras.layers.MaxPool2D(self.scale_factor, padding=self.padding, name=chname + "downsample_level" + str(l)) for l in range(self.nlevels - 1)] elif ndims == 3: self.down = [ tf.keras.layers.MaxPool3D(self.scale_factor, padding=self.padding, name=chname + "downsample_level" + str(l)) for l in range(self.nlevels - 1)] else: logger.error("Method not implemented for {} dimensions".format(ndims)) self.up = [clayers.upsampling_custom( nodes=self.nn_nodes[1][l + 1], scale_factor=self.scale_factor, ndims=ndims, name=chname + "upsample_level" + str(l) ) for l in range(self.nlevels - 1)] self.do_att = do_att self.act2 = None if self.do_att == 'att_chcomb_postact' else 'relu' if self.do_att is None: self.pos_att = {k: False for k in ('encoder', 'bottleneck', 'decoder')} self.att_multi = False else: self.pos_att = pos_att or {k: True for k in ('encoder', 'bottleneck', 'decoder')} self.att_multi = 'chcomb' in self.do_att fatt = clayers.get_attention_layer(self.do_att) # self.latt = [[ # fatt("SE_" + net + str(nl)) for nl in range(self.nlevels) # ] for laux, net in zip((self.nlevels, self.nlevels - 1), ('e', 'd'))] self.latt_enc = [ fatt("SE_enc_l" + str(nl)) for nl in range(self.nlevels - 1) ] if self.pos_att['encoder'] else None self.latt_dec = [ fatt("SE_dec_l" + str(nl)) for nl in range(self.nlevels - 1) ] if self.pos_att['decoder'] else None self.latt_bottleneck = fatt("SE_bn") if self.pos_att['bottleneck'] else None self.encoder[0] = clayers.blockconv(self.nn_nodes[0][0], do_batchnorm=self.do_batch_norm, ksize=self.ksize, padding=self.padding, ndims=ndims, name=chname + "BlockDown_level0", rdrop=float(self.msettings["dropout"])) for level in range(1, self.nlevels): self.encoder[level] = clayers.blockconv(self.nn_nodes[0][level], do_batchnorm=self.do_batch_norm, ksize=self.ksize, padding=self.padding, activation2=self.act2, ndims=ndims, name=chname + "BlockDown_level" + str(level), rdrop=float(self.msettings["dropout"])) self.decoder[level - 1] = clayers.blockconv(self.nn_nodes[1][level], do_batchnorm=self.do_batch_norm, ksize=self.ksize, padding=self.padding, activation2=self.act2, ndims=ndims, name=chname + "BlockUp_level" + str(level), rdrop=float(self.msettings["dropout"])) self.crop_encoder[level - 1] = clayers.skipconnect( level - 1, self.nlevels, scale_factor=self.scale_factor, padding=self.padding, ndims=ndims, name=chname + "cropping_level" + str(level)) if self.do_outlayer: if ndims == 2: self.final_layer = tf.keras.layers.Conv2D(filters=self.nlabels, kernel_size=(1, 1), padding=self.padding, activation=self.activation, name=chname + 'output_layer') elif ndims == 3: self.final_layer = tf.keras.layers.Conv3D(filters=self.nlabels, kernel_size=(1, 1, 1), padding=self.padding, activation=self.activation, name=chname + 'output_layer') else: logger.error("Method not implemented for {} dimensions".format(ndims)) else: self.final_layer = tf.keras.layers.Lambda(lambda x: x, name=chname + 'output_layer') # @tf.function def call(self, inputs): x = inputs if self.chdrop: x = self.drop_channels(x) redaxis = [1, 2] if self.ndims == 2 else [1, 2, 3] x_chcomb = tf.cast(tf.not_equal(tf.reduce_sum(x, axis=redaxis), 0), tf.float32) if self.chdrop_multi: self.drop_channels_att([x, x_chcomb]) # l_skips = tf.TensorArray(dtype=tf.float32, size=self.nlevels - 1, infer_shape=False) l_skips = [None] * (self.nlevels - 1) if self.do_channelgather: x = self.channelgather(x) x = self.encoder[0](x) for level, (fencoder, bdown) in enumerate(zip(self.encoder[1:], self.down)): if self.pos_att['encoder']: if self.att_multi: xatt_aux = [x, x_chcomb] else: xatt_aux = x x = self.latt_enc[level](xatt_aux) # l_skips = l_skips.write(level, x) l_skips[level] = x x = bdown(x) x = fencoder(x) if self.pos_att['bottleneck']: if self.att_multi: xatt_aux = [x, x_chcomb] else: xatt_aux = x x = self.latt_bottleneck(xatt_aux) max_level = self.nlevels - 1 for level2, (fdecoder, bup, bconcat, skip_crop) in enumerate(zip( self.decoder, self.up, self.concat, reversed(self.crop_encoder))): max_level -= 1 # x_skip = l_skips.read(max_level) x_skip = l_skips[max_level] x = bup(x) x_skip_crop = skip_crop(x_skip) # x_skip_crop = clayers.crop_match(x_skip, x) x = bconcat([x, x_skip_crop]) x = fdecoder(x) if self.pos_att['decoder']: if self.att_multi: xatt_aux = [x, x_chcomb] else: xatt_aux = x x = self.latt_dec[level2](xatt_aux) xout = self.final_layer(x) return xout class UNet2D(UNet): def __init__(self, ndims=2, **kwargs): super(UNet2D, self).__init__(ndims=ndims, **kwargs) class UNet_MarkerDrop_MarkerExcite(UNet2D): """ MS-ME model """ def __init__(self, **kwargs): super(UNet_MarkerDrop_MarkerExcite, self).__init__(do_att="att_chcomb", chdrop="nonorm", **kwargs) class MarkerSampling_MarkerExcite(UNet_MarkerDrop_MarkerExcite): """ Alias for UNet_MarkerDrop_MarkerExcite """ pass class UNet3D(UNet): """ Custom 3D U-Net model """ def __init__(self, **kwargs): super(UNet3D, self).__init__(ndims=3, nodes_factor=4, **kwargs) self.name_metrictrack = 'fscore' def get_loss(self): return tf.keras.losses.MeanSquaredError() def build_metrics(self, data_set=None, **kwargs): if data_set == 'train': return [tf.keras.metrics.MeanSquaredError()] else: return [cmetrics.CellMetrics()]
3,025
-2
292
bd11547a58a12b55ae74731078e2652183703ae4
3,559
py
Python
src/euros/en.py
ThbtSprt/euros
750be3d31eab995b162a916234351e47a2dc27fa
[ "MIT" ]
5
2021-06-11T08:32:35.000Z
2021-08-13T15:48:22.000Z
src/euros/en.py
ThbtSprt/euros
750be3d31eab995b162a916234351e47a2dc27fa
[ "MIT" ]
1
2021-07-24T13:47:15.000Z
2021-07-24T17:19:55.000Z
src/euros/en.py
ThbtSprt/euros
750be3d31eab995b162a916234351e47a2dc27fa
[ "MIT" ]
null
null
null
from re import sub chiffres = ['','one','two','three','four','five','six','seven','eight','nine','ten','eleven','twelve','thirteen','fourteen','fifteen','sixteen','seventeen','eighteen','nineteen'] nombres = ['','ten','twenty','thirty','forty','fifty','sixty','seventy','eighty','ninety'] def formater(x): """ Function to clean up the string input and convert it into a float One mandatory arg : int, float, Decimal, or str representing numbers """ if type(x)==str: if ',' in x and len(x.split(',')[1])==3: x=x.replace(',','') elif ',' in x: x=x.replace(',','.') x=sub(r'\s*[a-zA-Z]*','',x) x=round(float(x),2) if x<0: return str(0.00) return str(x) def unite(x): """ Converts x into letters when int(x)<1000 One mandatory argument : any string input representing an integer < 1000 """ x = str(x) if len(x)==2: x= '0'+x elif len(x) ==1: x = '00'+x elif len(x) !=3: return '' if int(x[-2:])<20: dizaines = chiffres[int(x[-2:])] elif int(x[-1])==0: dizaines = nombres[int(x[-2])] else: dizaines = nombres[int(x[-2])]+'-'+chiffres[int(x[-1])] if x[0] == '0': return dizaines else: centaines = chiffres[int(x[0])]+' hundred' if dizaines!='': centaines+=' and ' return centaines+dizaines def nombre2lettres(x): """ This function converts an integer into letters. It is called twice by conv() : firstly, for the integer part of the input, and secondly for the decimals if there are some. """ x=str(x) if len(x)<=3: total = unite(x) else: milliards, millions, milliers = '','','' sp,sp2,sp3='','','' if unite(x[-3:]) != '':#nécessité d'un espace avant les centaines sp= ', ' if unite(x[-6:-3]) != '' and len(x)>6:#nécessité d'un espace avant les milliers sp2 = ', ' if unite(x[-9:-6]) != '' and len(x)>9:#nécessité d'un espace avant les millions sp3 = ', ' #MILLIERS if x[-6:-3] == '000': milliers = ''+sp+unite(x[-3:]) elif x == '1000': milliers = 'a thousand' else: milliers = unite(x[-6:-3])+' thousand'+sp+unite(x[-3:]) #MILLIONS if len(x)>6: if x[-9:-6] == '000': millions = '' else: millions = unite(x[-9:-6])+' million' #MILLIARDS if len(x)>9: if x[-12:-9] == '000' or len(x)>12: milliards = 'more than trillion' else: milliards = unite(x[-12:-9])+' billion' #TOTAL total=milliards+sp3+millions+sp2+milliers return total def conv(x): """ Principal function of this package : It takes only one argument (int, float or str) and converts it into letters. Example : conv(10) returns 'ten euros' """ x=formater(x) e,c = x.split('.')[0],x.split('.')[1] if len(c)==1: c=c+'0' if int(c)==0: c='' elif int(c)==1: c='one cent' else: c=nombre2lettres(c)+' cents' if int(e)==0: if c=='': return 'zero euro' else: return c elif int(e)==1: e='one euro' else: e=nombre2lettres(e)+' euros' if c=='': return e else: return e+' and '+c if __name__ == '__main__': result=conv(str(input('Enter here the amount in figures :'))) print(result)
27.376923
178
0.503793
from re import sub chiffres = ['','one','two','three','four','five','six','seven','eight','nine','ten','eleven','twelve','thirteen','fourteen','fifteen','sixteen','seventeen','eighteen','nineteen'] nombres = ['','ten','twenty','thirty','forty','fifty','sixty','seventy','eighty','ninety'] def formater(x): """ Function to clean up the string input and convert it into a float One mandatory arg : int, float, Decimal, or str representing numbers """ if type(x)==str: if ',' in x and len(x.split(',')[1])==3: x=x.replace(',','') elif ',' in x: x=x.replace(',','.') x=sub(r'\s*[a-zA-Z]*','',x) x=round(float(x),2) if x<0: return str(0.00) return str(x) def unite(x): """ Converts x into letters when int(x)<1000 One mandatory argument : any string input representing an integer < 1000 """ x = str(x) if len(x)==2: x= '0'+x elif len(x) ==1: x = '00'+x elif len(x) !=3: return '' if int(x[-2:])<20: dizaines = chiffres[int(x[-2:])] elif int(x[-1])==0: dizaines = nombres[int(x[-2])] else: dizaines = nombres[int(x[-2])]+'-'+chiffres[int(x[-1])] if x[0] == '0': return dizaines else: centaines = chiffres[int(x[0])]+' hundred' if dizaines!='': centaines+=' and ' return centaines+dizaines def nombre2lettres(x): """ This function converts an integer into letters. It is called twice by conv() : firstly, for the integer part of the input, and secondly for the decimals if there are some. """ x=str(x) if len(x)<=3: total = unite(x) else: milliards, millions, milliers = '','','' sp,sp2,sp3='','','' if unite(x[-3:]) != '':#nécessité d'un espace avant les centaines sp= ', ' if unite(x[-6:-3]) != '' and len(x)>6:#nécessité d'un espace avant les milliers sp2 = ', ' if unite(x[-9:-6]) != '' and len(x)>9:#nécessité d'un espace avant les millions sp3 = ', ' #MILLIERS if x[-6:-3] == '000': milliers = ''+sp+unite(x[-3:]) elif x == '1000': milliers = 'a thousand' else: milliers = unite(x[-6:-3])+' thousand'+sp+unite(x[-3:]) #MILLIONS if len(x)>6: if x[-9:-6] == '000': millions = '' else: millions = unite(x[-9:-6])+' million' #MILLIARDS if len(x)>9: if x[-12:-9] == '000' or len(x)>12: milliards = 'more than trillion' else: milliards = unite(x[-12:-9])+' billion' #TOTAL total=milliards+sp3+millions+sp2+milliers return total def conv(x): """ Principal function of this package : It takes only one argument (int, float or str) and converts it into letters. Example : conv(10) returns 'ten euros' """ x=formater(x) e,c = x.split('.')[0],x.split('.')[1] if len(c)==1: c=c+'0' if int(c)==0: c='' elif int(c)==1: c='one cent' else: c=nombre2lettres(c)+' cents' if int(e)==0: if c=='': return 'zero euro' else: return c elif int(e)==1: e='one euro' else: e=nombre2lettres(e)+' euros' if c=='': return e else: return e+' and '+c if __name__ == '__main__': result=conv(str(input('Enter here the amount in figures :'))) print(result)
0
0
0
17845ad1df35ea63d566e55450a0df8d9184b04b
6,077
py
Python
data/external/repositories_2to3/197834/Kaggle_Avito-2015-master/model2.py
Keesiu/meta-kaggle
87de739aba2399fd31072ee81b391f9b7a63f540
[ "MIT" ]
null
null
null
data/external/repositories_2to3/197834/Kaggle_Avito-2015-master/model2.py
Keesiu/meta-kaggle
87de739aba2399fd31072ee81b391f9b7a63f540
[ "MIT" ]
null
null
null
data/external/repositories_2to3/197834/Kaggle_Avito-2015-master/model2.py
Keesiu/meta-kaggle
87de739aba2399fd31072ee81b391f9b7a63f540
[ "MIT" ]
1
2019-12-04T08:23:33.000Z
2019-12-04T08:23:33.000Z
''' This script runs the ftrl-proximal model on data from combo.gl and extras.gl. author: David Thaler date: July 2015 ''' import avito2_io from run_model import compute_offset import sframes from ftrl_proximal import ftrl_proximal from hash_features import hash_features from eval import logloss from datetime import datetime from math import log, exp, ceil import os import argparse import numpy as np import pandas as pd import pdb DROP_COLS = ['rowId','SearchID', 'isDS', 'isVal', 'isTest', 'IsClick','ID'] ROUND_COLS = ['HistCTR', 'cat_pos', 'spe_cat','spe_pos','sqe_cat','sqe_pos'] COL_NAMES = None VAL_START = 0 TEST_START = 0 def chunk_iterator(combo, chunk_size = 50000, start=0): ''' Returns a generator that yields chunks of data. combo - the SFrame to draw from. ''' global COL_NAMES names = combo.column_names() names.insert(0, 'rowId') COL_NAMES = names chunk_start = start data_end = combo.shape[0] chunk_end = min(data_end, start + chunk_size) while chunk_start < data_end: print('chunk [%d:%d]' % (chunk_start, chunk_end)) chunk = combo[chunk_start:chunk_end].to_dataframe() chunk_start += chunk_size chunk_end = min(data_end, chunk_end + chunk_size) yield chunk def select_train(chunk_it, all=True): ''' A generator to filter rows from the other generator. The val/test/ds sets are now files. This filters train rows for full-train or all-data-train-val. ''' for df in chunk_it: if all: # all labeled data incl. usual val set out = df[df.isTest == 0] else: # all labeled data except val set out = df[np.logical_and(df.isTest == 0, df.isVal == 0)] if out.shape[0] > 0: yield out def train(data, alpha=0.1, beta=1.0, L1=0.0, L2=0.1, D=2**26): ''' Runs one training pass. ''' model = ftrl_proximal(alpha, beta, L1, L2, D, False) for df in data: for t in df.itertuples(): x, y = process_line(t, False) f = hash_features(x, D) p = model.predict(f) model.update(f, p, y) return model def prepareSFrame(data): ''' Call this on the SFrames before calling chunk_iterator, but after adding in extra.gl, if necessary. ''' data['SearchQuery'] = data['SearchQuery'].apply(lambda s : s.strip().lower()[:5]) data['ad_sq'] = data.apply(lambda x : x['SearchQuery'] + str(x['AdID']) if x['sqe'] else '_') data['st_pos'] = data['Position'] + 0.1 * data['seenToday'] data['st_cat'] = data['CategoryID'] + 0.1 * data['seenToday'] data['st_spe'] = data['spe'] + 0.1 * data['seenToday'] data['st_sqe'] = data['sqe'] + 0.1 * data['seenToday'] data['st_ad'] = data['AdID'] + 0.1 * data['seenToday'] data['ad_pos'] = data['AdID'] + 0.1 * data['Position'] data['ad_spe'] = data['AdID'] + 0.1 * data['spe'] data['ad_sqe'] = data['AdID'] + 0.1 * data['sqe'] # Dead Code: def extendSFrame(data): ''' For now, call after prepareSFrame, and adding extras2.gl, is needed. Later, we'll merge this in. ''' # ucat == user_clicked_ad_today data['ucat_pos'] = data['user_clicked_ad_today'] + 0.1 * data['Position'] data['ucat_cat'] = data['user_clicked_ad_today'] + 0.1 * data['CategoryID'] data['ucat_spe'] = data['user_clicked_ad_today'] + 0.1 * data['spe'] data['ucat_sqe'] = data['user_clicked_ad_today'] + 0.1 * data['sqe'] # NB: ucat_ad doesn't make sense # uc = user_clicked_today data['uc_pos'] = data['user_clicked_today'] + 0.1 * data['Position'] data['uc_cat'] = data['user_clicked_today'] + 0.1 * data['CategoryID'] data['uc_spe'] = data['user_clicked_today'] + 0.1 * data['spe'] data['uc_sqe'] = data['user_clicked_today'] + 0.1 * data['sqe'] data['uc_ad'] = data['user_clicked_today'] + 0.1 * data['AdID']
30.691919
96
0.611486
''' This script runs the ftrl-proximal model on data from combo.gl and extras.gl. author: David Thaler date: July 2015 ''' import avito2_io from run_model import compute_offset import sframes from ftrl_proximal import ftrl_proximal from hash_features import hash_features from eval import logloss from datetime import datetime from math import log, exp, ceil import os import argparse import numpy as np import pandas as pd import pdb DROP_COLS = ['rowId','SearchID', 'isDS', 'isVal', 'isTest', 'IsClick','ID'] ROUND_COLS = ['HistCTR', 'cat_pos', 'spe_cat','spe_pos','sqe_cat','sqe_pos'] COL_NAMES = None VAL_START = 0 TEST_START = 0 def chunk_iterator(combo, chunk_size = 50000, start=0): ''' Returns a generator that yields chunks of data. combo - the SFrame to draw from. ''' global COL_NAMES names = combo.column_names() names.insert(0, 'rowId') COL_NAMES = names chunk_start = start data_end = combo.shape[0] chunk_end = min(data_end, start + chunk_size) while chunk_start < data_end: print('chunk [%d:%d]' % (chunk_start, chunk_end)) chunk = combo[chunk_start:chunk_end].to_dataframe() chunk_start += chunk_size chunk_end = min(data_end, chunk_end + chunk_size) yield chunk def select_train(chunk_it, all=True): ''' A generator to filter rows from the other generator. The val/test/ds sets are now files. This filters train rows for full-train or all-data-train-val. ''' for df in chunk_it: if all: # all labeled data incl. usual val set out = df[df.isTest == 0] else: # all labeled data except val set out = df[np.logical_and(df.isTest == 0, df.isVal == 0)] if out.shape[0] > 0: yield out def process_line(t, isTest): line = dict(list(zip(COL_NAMES, t))) if isTest: y = line['ID'] else: y = line['IsClick'] for col in DROP_COLS: line.pop(col) for col in ROUND_COLS: line[col] = round(line[col], 1) line.pop('SearchParams') line.pop('dt') return line, y def train(data, alpha=0.1, beta=1.0, L1=0.0, L2=0.1, D=2**26): ''' Runs one training pass. ''' model = ftrl_proximal(alpha, beta, L1, L2, D, False) for df in data: for t in df.itertuples(): x, y = process_line(t, False) f = hash_features(x, D) p = model.predict(f) model.update(f, p, y) return model def validate(data, model, offset=0.0): loss = 0.0 count = 0 for k, df in enumerate(data): for t in df.itertuples(): count += 1 x, y = process_line(t, False) f = hash_features(x, model.D) dv = model.predict(f, False) dv += offset p = 1.0/(1.0 + exp(-dv)) loss += logloss(p, y) return loss/count def predict(data, model, offset=0.0): out = [] for k, df in enumerate(data): for t in df.itertuples(): x, id = process_line(t, True) f = hash_features(x, model.D) dv = model.predict(f, False) dv += offset p = 1.0/(1.0 + exp(-dv)) out.append((id, p)) return pd.DataFrame(out, columns=['ID','IsClick']) def write_submission(submit_id, preds): submit_name = 'submission%s.csv' % str(submit_id) submit_path = os.path.join(avito2_io.SUBMIT, submit_name) preds.to_csv(submit_path, index=False) def val_run(tr, val, alpha=0.1, beta=1.0, L1=0.0, L2=0.1, D=2**26, offset=-2.7): start = datetime.now() it = chunk_iterator(tr, chunk_size=50000) print('training...') m = train(it, alpha=alpha, beta=beta, L1=L1, L2=L2, D=D) it = chunk_iterator(val, chunk_size=50000) print('evaluating...') loss = validate(it, m, offset) print('loss: %.5f' % loss) print('elapsed time: %s' % (datetime.now() - start)) return m def prepareSFrame(data): ''' Call this on the SFrames before calling chunk_iterator, but after adding in extra.gl, if necessary. ''' data['SearchQuery'] = data['SearchQuery'].apply(lambda s : s.strip().lower()[:5]) data['ad_sq'] = data.apply(lambda x : x['SearchQuery'] + str(x['AdID']) if x['sqe'] else '_') data['st_pos'] = data['Position'] + 0.1 * data['seenToday'] data['st_cat'] = data['CategoryID'] + 0.1 * data['seenToday'] data['st_spe'] = data['spe'] + 0.1 * data['seenToday'] data['st_sqe'] = data['sqe'] + 0.1 * data['seenToday'] data['st_ad'] = data['AdID'] + 0.1 * data['seenToday'] data['ad_pos'] = data['AdID'] + 0.1 * data['Position'] data['ad_spe'] = data['AdID'] + 0.1 * data['spe'] data['ad_sqe'] = data['AdID'] + 0.1 * data['sqe'] def full_train(): combo = sframes.load('combo.gl') extras = sframes.load('extras.gl') combo.add_columms(extras) prepareSFrame(combo) cit = chunk_iterator(combo) sit = select_train(cit, True) model = train(sit) return model def run_test(model): test = sframes.load('combo_test.gl') prepareSFrame(test) cit = chunk_iterator(test) pred = predict(cit, model) return pred # Dead Code: def extendSFrame(data): ''' For now, call after prepareSFrame, and adding extras2.gl, is needed. Later, we'll merge this in. ''' # ucat == user_clicked_ad_today data['ucat_pos'] = data['user_clicked_ad_today'] + 0.1 * data['Position'] data['ucat_cat'] = data['user_clicked_ad_today'] + 0.1 * data['CategoryID'] data['ucat_spe'] = data['user_clicked_ad_today'] + 0.1 * data['spe'] data['ucat_sqe'] = data['user_clicked_ad_today'] + 0.1 * data['sqe'] # NB: ucat_ad doesn't make sense # uc = user_clicked_today data['uc_pos'] = data['user_clicked_today'] + 0.1 * data['Position'] data['uc_cat'] = data['user_clicked_today'] + 0.1 * data['CategoryID'] data['uc_spe'] = data['user_clicked_today'] + 0.1 * data['spe'] data['uc_sqe'] = data['user_clicked_today'] + 0.1 * data['sqe'] data['uc_ad'] = data['user_clicked_today'] + 0.1 * data['AdID']
2,011
0
183
d295c9c8af4367b5caeee5f1b85613c99b24d519
159
py
Python
python/ql/test/library-tests/modules/__all__/no_all.py
timoles/codeql
2d24387e9e300bf03be35694816b1e76ae88a50c
[ "MIT" ]
4,036
2020-04-29T00:09:57.000Z
2022-03-31T14:16:38.000Z
python/ql/test/library-tests/modules/__all__/no_all.py
timoles/codeql
2d24387e9e300bf03be35694816b1e76ae88a50c
[ "MIT" ]
2,970
2020-04-28T17:24:18.000Z
2022-03-31T22:40:46.000Z
python/ql/test/library-tests/modules/__all__/no_all.py
ScriptBox99/github-codeql
2ecf0d3264db8fb4904b2056964da469372a235c
[ "MIT" ]
794
2020-04-29T00:28:25.000Z
2022-03-30T08:21:46.000Z
foo = "foo" bar = "bar" baz = "baz" # When `__all__` is not defined, names starting with underscore is not imported with `from <module> import *` _qux = "qux"
26.5
109
0.679245
foo = "foo" bar = "bar" baz = "baz" # When `__all__` is not defined, names starting with underscore is not imported with `from <module> import *` _qux = "qux"
0
0
0
1bbc16675a2f5aea15cb82cb075fde95938a9438
7,942
py
Python
deepstreampy/event.py
sapid/deepstreampy-twisted
78025141bb0ac3aadc248d68f9273bf8993fc3d4
[ "MIT" ]
28
2016-06-16T08:25:28.000Z
2022-03-03T06:48:13.000Z
deepstreampy/event.py
sapid/deepstreampy-twisted
78025141bb0ac3aadc248d68f9273bf8993fc3d4
[ "MIT" ]
8
2017-05-23T01:38:09.000Z
2020-03-18T09:21:49.000Z
deepstreampy/event.py
sapid/deepstreampy-twisted
78025141bb0ac3aadc248d68f9273bf8993fc3d4
[ "MIT" ]
11
2017-04-25T20:12:24.000Z
2020-04-21T13:28:32.000Z
"""Deepstream event handling.""" from __future__ import absolute_import, division, print_function, with_statement from __future__ import unicode_literals from deepstreampy.constants import actions from deepstreampy.constants import topic as topic_constants from deepstreampy.constants import event as event_constants from deepstreampy.message import message_parser from deepstreampy.message import message_builder from deepstreampy.utils import Listener from deepstreampy.utils import AckTimeoutRegistry from deepstreampy.utils import ResubscribeNotifier from tornado import concurrent from pyee import EventEmitter class EventHandler(object): """Handles incoming and outgoing messages related to deepstream events. """ def subscribe(self, name, callback): """Subscribe to an event. Adds a callback for both locally emited events as well as events emitted by other clients. Args: name (str): The name of the event. callback (callable): The function to call when an event is received. """ future = None if not self._emitter.listeners(name): self._ack_timeout_registry.add(name, actions.SUBSCRIBE) future = self._connection.send_message(topic_constants.EVENT, actions.SUBSCRIBE, [name]) else: future = concurrent.Future() future.set_result(None) self._emitter.on(name, callback) return future def unsubscribe(self, name, callback): """Unsubscribe from an event. Removes the callback for the specified event, and notifies the server of the change. Args: name (str): The name of the event callback (callable): The callback to remove """ self._emitter.remove_listener(name, callback) if not self._emitter.listeners(name): self._ack_timeout_registry.add(name, actions.UNSUBSCRIBE) return self._connection.send_message(topic_constants.EVENT, actions.UNSUBSCRIBE, [name]) future = concurrent.Future() future.set_result(None) return future def emit(self, name, data): """Emit an event locally, and tell the server to broadcast it. Other connected clients will also receive the event. Args: name (str): The name of the event. data: JSON serializable data to send along with the event. """ future = self._connection.send_message( topic_constants.EVENT, actions.EVENT, [name, message_builder.typed(data)]) self._emitter.emit(name, data) return future def listen(self, pattern, callback): """Register as listener for event subscriptions from other clients. Args: pattern (str): Regular expression pattern to match subscriptions to callback (callable): A function that will be called when an event has been initially subscribed to or is no longer subscribed. Expects the following arguments: event_name (str) is_subscribed (bool) response (callable, callable) """ if (pattern in self._listener and not self._listener[pattern].destroy_pending): self._client._on_error(topic_constants.EVENT, event_constants.LISTENER_EXISTS, pattern) future = concurrent.Future() future.set_result(None) return future elif pattern in self._listener: self._listener[pattern].destroy() listener = Listener(topic_constants.EVENT, pattern, callback, self._options, self._client, self._connection) self._listener[pattern] = listener return listener.send_future def unlisten(self, pattern): """Stop listening to the specified pattern. Remove a previously registered listening pattern. The client will no longer be listening for active/inactive subscriptions. Args: pattern: The regular expression pattern to remove """ if pattern not in self._listener: self._client._on_error(topic_constants.ERROR, event_constants.NOT_LISTENING, pattern) future = concurrent.Future() future.set_result(None) return future listener = self._listener[pattern] if not listener.destroy_pending: listener.send_destroy() else: self._ack_timeout_registry.add(pattern, actions.UNLISTEN) listener.destroy() del self._listener[pattern] return listener.send_future
35.297778
80
0.568623
"""Deepstream event handling.""" from __future__ import absolute_import, division, print_function, with_statement from __future__ import unicode_literals from deepstreampy.constants import actions from deepstreampy.constants import topic as topic_constants from deepstreampy.constants import event as event_constants from deepstreampy.message import message_parser from deepstreampy.message import message_builder from deepstreampy.utils import Listener from deepstreampy.utils import AckTimeoutRegistry from deepstreampy.utils import ResubscribeNotifier from tornado import concurrent from pyee import EventEmitter class EventHandler(object): """Handles incoming and outgoing messages related to deepstream events. """ def __init__(self, connection, client, **options): self._options = options self._connection = connection self._client = client self._emitter = EventEmitter() self._listener = {} subscription_timeout = options.get("subscriptionTimeout", 15) self._ack_timeout_registry = AckTimeoutRegistry(client, topic_constants.EVENT, subscription_timeout) self._resubscribe_notifier = ResubscribeNotifier(client, self._resubscribe) def subscribe(self, name, callback): """Subscribe to an event. Adds a callback for both locally emited events as well as events emitted by other clients. Args: name (str): The name of the event. callback (callable): The function to call when an event is received. """ future = None if not self._emitter.listeners(name): self._ack_timeout_registry.add(name, actions.SUBSCRIBE) future = self._connection.send_message(topic_constants.EVENT, actions.SUBSCRIBE, [name]) else: future = concurrent.Future() future.set_result(None) self._emitter.on(name, callback) return future def unsubscribe(self, name, callback): """Unsubscribe from an event. Removes the callback for the specified event, and notifies the server of the change. Args: name (str): The name of the event callback (callable): The callback to remove """ self._emitter.remove_listener(name, callback) if not self._emitter.listeners(name): self._ack_timeout_registry.add(name, actions.UNSUBSCRIBE) return self._connection.send_message(topic_constants.EVENT, actions.UNSUBSCRIBE, [name]) future = concurrent.Future() future.set_result(None) return future def emit(self, name, data): """Emit an event locally, and tell the server to broadcast it. Other connected clients will also receive the event. Args: name (str): The name of the event. data: JSON serializable data to send along with the event. """ future = self._connection.send_message( topic_constants.EVENT, actions.EVENT, [name, message_builder.typed(data)]) self._emitter.emit(name, data) return future def listen(self, pattern, callback): """Register as listener for event subscriptions from other clients. Args: pattern (str): Regular expression pattern to match subscriptions to callback (callable): A function that will be called when an event has been initially subscribed to or is no longer subscribed. Expects the following arguments: event_name (str) is_subscribed (bool) response (callable, callable) """ if (pattern in self._listener and not self._listener[pattern].destroy_pending): self._client._on_error(topic_constants.EVENT, event_constants.LISTENER_EXISTS, pattern) future = concurrent.Future() future.set_result(None) return future elif pattern in self._listener: self._listener[pattern].destroy() listener = Listener(topic_constants.EVENT, pattern, callback, self._options, self._client, self._connection) self._listener[pattern] = listener return listener.send_future def unlisten(self, pattern): """Stop listening to the specified pattern. Remove a previously registered listening pattern. The client will no longer be listening for active/inactive subscriptions. Args: pattern: The regular expression pattern to remove """ if pattern not in self._listener: self._client._on_error(topic_constants.ERROR, event_constants.NOT_LISTENING, pattern) future = concurrent.Future() future.set_result(None) return future listener = self._listener[pattern] if not listener.destroy_pending: listener.send_destroy() else: self._ack_timeout_registry.add(pattern, actions.UNLISTEN) listener.destroy() del self._listener[pattern] return listener.send_future def handle(self, message): action = message['action'] data = message['data'] if action == actions.ACK: name = message['data'][1] else: name = message['data'][0] if action == actions.EVENT: if data and len(data) == 2: self._emitter.emit( name, message_parser.convert_typed(data[1], self._client)) else: self._emitter.emit(name) return if (action == actions.ACK and data[0] == actions.UNLISTEN and (name in self._listener) and self._listener[name].destroy_pending): self._listener[name].destroy() del self._listener[name] return elif name in self._listener: self._listener[name]._on_message(message) return elif action in (actions.SUBSCRIPTION_FOR_PATTERN_REMOVED, actions.SUBSCRIPTION_HAS_PROVIDER): return if action == actions.ACK: self._ack_timeout_registry.clear(message) return if action == actions.ERROR: if data[0] == event_constants.MESSAGE_DENIED: self._ack_timeout_registry.remove(message['data'][1], message['data'][2]) elif data[0] == event_constants.NOT_SUBSCRIBED: self._ack_timeout_registry.remove(message['data'][1], actions.UNSUBSCRIBE) message['processedError'] = True self._client._on_error(topic_constants.EVENT, data[0], data[1]) return self._client._on_error(topic_constants.EVENT, event_constants.UNSOLICITED_MESSAGE, name) def _resubscribe(self): for event in self._emitter._events: self._connection.send_message(topic_constants.EVENT, actions.SUBSCRIBE, [event])
2,690
0
81
0a692b3cd1f1f666974a59c77dda0ffc0636b6d0
12,132
py
Python
research/cv/Yolact++/train.py
mindspore-ai/models
9127b128e2961fd698977e918861dadfad00a44c
[ "Apache-2.0" ]
77
2021-10-15T08:32:37.000Z
2022-03-30T13:09:11.000Z
research/cv/Yolact++/train.py
mindspore-ai/models
9127b128e2961fd698977e918861dadfad00a44c
[ "Apache-2.0" ]
3
2021-10-30T14:44:57.000Z
2022-02-14T06:57:57.000Z
research/cv/Yolact++/train.py
mindspore-ai/models
9127b128e2961fd698977e918861dadfad00a44c
[ "Apache-2.0" ]
24
2021-10-15T08:32:45.000Z
2022-03-24T18:45:20.000Z
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """ Train our Model """ import os import argparse import ast from mindspore.communication.management import get_rank, get_group_size # import mindspore.common.dtype as mstype from mindspore import context, Tensor from mindspore.communication.management import init from mindspore.train import Model from mindspore.context import ParallelMode from mindspore.train.serialization import load_checkpoint, load_param_into_net import mindspore.nn as nn from mindspore.common import set_seed from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor, SummaryCollector, Callback from mindspore.common import initializer as init_p from src.loss_monitor import LossMonitor from src.yolact.layers.modules.loss import MultiBoxLoss from src.yolact.yolactpp import Yolact from src.config import yolact_plus_resnet50_config as cfg from src.dataset import data_to_mindrecord_byte_image, create_yolact_dataset from src.lr_schedule import dynamic_lr from src.network_define import WithLossCell set_seed(1) parser = argparse.ArgumentParser(description="Yolact++ training") parser.add_argument("--only_create_dataset", type=ast.literal_eval, default=False, help="If set it true, only create " "Mindrecord, default is false.") # Modelarts --run_distribute default is True parser.add_argument("--run_distribute", type=ast.literal_eval, default=False, help="Run distribute, default is false.") parser.add_argument("--do_train", type=ast.literal_eval, default=True, help="Do train or not, default is true.") parser.add_argument("--do_eval", type=ast.literal_eval, default=False, help="Do eval or not, default is false.") parser.add_argument("--dataset", type=str, default="coco", help="Dataset, default is coco.") parser.add_argument("--pre_trained", type=str, default=None, help="Pretrain file path.") parser.add_argument("--device_id", type=int, default=3, help="Device id, default is 0.") parser.add_argument("--device_num", type=int, default=1, help="Use device nums, default is 1.") parser.add_argument("--rank_id", type=int, default=0, help="Rank id, default is 0.") parser.add_argument("--net_ckpt", type=str, default="/data/yolact/yolact-20_619.ckpt", help="Do") parser.add_argument("--run_platform", type=str, default="Ascend", choices="Ascend", help="run platform, only support Ascend.") parser.add_argument("--distribute", type=ast.literal_eval, default=False, help="Run distribute, default is False.") parser.add_argument("--train_url", type=str, default="obs://xxx", help="ckpt output dir in obs") # Modelarts parser.add_argument("--data_url", type=str, default="obs://xxx", help="mindrecord file path.") # Modelarts parser.add_argument('--is_modelarts', type=str, default="False", help='is train on modelarts') args_opt = parser.parse_args() class TransferCallback(Callback): """Callback""" if __name__ == '__main__': print("Start train for yolact!") if args_opt.run_platform == "Ascend": if args_opt.is_modelarts == "True": import moxing as mox context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=device_id, max_call_depth=10000) device_id = int(os.getenv('DEVICE_ID'), 0) if not args_opt.do_eval and args_opt.run_distribute: init() rank = get_rank() device_num = get_group_size() context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True) else: rank = 0 device_num = 1 else: context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", max_call_depth=10000, enable_reduce_precision=True) if args_opt.distribute: if os.getenv("DEVICE_ID", "not_set").isdigit(): context.set_context(device_id=int(os.getenv("DEVICE_ID"))) init() device_num = int(os.getenv("DEVICE_NUM")) rank = int(os.getenv("RANK_ID")) rank_size = int(os.getenv("RANK_SIZE")) context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True, device_num=device_num) else: rank = 0 device_num = 1 context.set_context(device_id=int(args_opt.device_id), save_graphs=True) else: raise ValueError("Unsupported platform.") print("Start create dataset!") if args_opt.is_modelarts == "True": ckpt_filename = "resnet50.ckpt" local_data_url = "/cache/mr/" + str(device_id) local_pretrained_url = "/cache/weights/" local_train_url = "/cache/ckpt" mox.file.make_dirs(local_data_url) mox.file.make_dirs(local_train_url) mox.file.make_dirs(local_pretrained_url) local_pretrained_url = local_pretrained_url + "resnet50.ckpt" filename = "yolact.mindrecord0" mox.file.copy_parallel(args_opt.data_url, local_data_url) if args_opt.pre_trained is not None: mox.file.copy(args_opt.pre_trained, local_pretrained_url) local_data_path = os.path.join(local_data_url, filename) else: prefix = "yolact.mindrecord" mindrecord_dir = cfg['mindrecord_dir'] mindrecord_file = os.path.join(mindrecord_dir, prefix + "0") if rank == 0 and not os.path.exists(mindrecord_file): if not os.path.isdir(mindrecord_dir): os.makedirs(mindrecord_dir) if args_opt.dataset == "coco": if os.path.isdir(cfg['coco_root']): print("Create Mindrecord.") data_to_mindrecord_byte_image("coco", True, prefix) print("Create Mindrecord Done, at {}".format(mindrecord_dir)) else: raise Exception("coco_root not exits.") else: if os.path.isdir(cfg['IMAGE_DIR']) and os.path.exists(cfg['ANNO_PATH']): print("Create Mindrecord.") data_to_mindrecord_byte_image("other", True, prefix) print("Create Mindrecord Done, at {}".format(mindrecord_dir)) else: raise Exception("IMAGE_DIR or ANNO_PATH not exits.") if not args_opt.only_create_dataset: if args_opt.is_modelarts == "True": dataset = create_yolact_dataset(local_data_path, batch_size=cfg['batch_size'], device_num=device_num, rank_id=rank) else: dataset = create_yolact_dataset(mindrecord_file, batch_size=cfg['batch_size'], device_num=device_num, rank_id=rank) num_steps = dataset.get_dataset_size() print("pre epoch step num: ", num_steps) print("Create dataset done!") net = Yolact() net = net.set_train() if args_opt.is_modelarts == "True": ckpt_file_name = "resnet50.ckpt" backbone_path = local_pretrained_url if args_opt.pre_trained is not None: param_dict = load_checkpoint(backbone_path) if cfg['pretrain_epoch_size'] == 0: for item in list(param_dict.keys()): if not item.startswith('backbone'): param_dict.pop(item) load_param_into_net(net, param_dict) init_weights(net) if args_opt.is_modelarts == "False": ckpt_path = args_opt.net_ckpt if ckpt_path != "": param_dict = load_checkpoint(ckpt_path) load_param_into_net(net, param_dict) loss = MultiBoxLoss(num_classes=cfg['num_classes'], pos_threshold=cfg['positive_iou_threshold'], neg_threshold=cfg['negative_iou_threshold'], negpos_ratio=cfg['ohem_negpos_ratio'], batch_size=cfg['batch_size'], num_priors=cfg['num_priors']) net_with_loss = WithLossCell(net, loss) lr = Tensor(dynamic_lr(cfg, start_epoch=0, total_epochs=cfg['epoch_size'], steps_each_epoch=num_steps), mstype.float32) opt = nn.Momentum(params=net.trainable_params(), learning_rate=cfg['lr'], momentum=cfg['momentum'], weight_decay=cfg['decay'], loss_scale=cfg['loss_scale']) # define model if args_opt.is_modelarts == "True": model = Model(net_with_loss, optimizer=opt, amp_level='O0') else: model = Model(net_with_loss, optimizer=opt, amp_level='O3') print("============== Starting Training ==============") time_cb = TimeMonitor(data_size=num_steps) loss_cb = LossMonitor() if args_opt.is_modelarts == "True": summary_collector = SummaryCollector(summary_dir='./summary_dir', collect_freq=100) else: summary_collector = SummaryCollector(summary_dir='./summary_dir', collect_freq=10) cb = [time_cb, loss_cb] if args_opt.is_modelarts == "True": if cfg['save_checkpoint']: ckptconfig = CheckpointConfig(save_checkpoint_steps=cfg['save_checkpoint_epochs'] * num_steps, keep_checkpoint_max=cfg['keep_checkpoint_max']) ckpoint_cb = ModelCheckpoint(prefix='yolact', directory=local_train_url, config=ckptconfig) transferCb = TransferCallback(local_train_url, args_opt.train_url) if device_id == 0: cb += [ckpoint_cb, transferCb] model.train(cfg['epoch_size'], dataset, callbacks=cb, dataset_sink_mode=True) else: if cfg['save_checkpoint']: ckptconfig = CheckpointConfig(save_checkpoint_steps=cfg['save_checkpoint_epochs'] * num_steps, keep_checkpoint_max=cfg['keep_checkpoint_max']) save_checkpoint_path = os.path.join(cfg['save_checkpoint_path'], 'ckpt_' + str(rank) + '/') ckpoint_cb = ModelCheckpoint(prefix='yolact', directory=save_checkpoint_path, config=ckptconfig) cb += [ckpoint_cb] model.train(cfg['epoch_size'], dataset, callbacks=cb, dataset_sink_mode=False) print("============== End Training ==============")
49.518367
119
0.639219
# Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ """ Train our Model """ import os import argparse import ast from mindspore.communication.management import get_rank, get_group_size # import mindspore.common.dtype as mstype from mindspore import context, Tensor from mindspore.communication.management import init from mindspore.train import Model from mindspore.context import ParallelMode from mindspore.train.serialization import load_checkpoint, load_param_into_net import mindspore.nn as nn from mindspore.common import set_seed from mindspore.train.callback import ModelCheckpoint, CheckpointConfig, TimeMonitor, SummaryCollector, Callback from mindspore.common import initializer as init_p from src.loss_monitor import LossMonitor from src.yolact.layers.modules.loss import MultiBoxLoss from src.yolact.yolactpp import Yolact from src.config import yolact_plus_resnet50_config as cfg from src.dataset import data_to_mindrecord_byte_image, create_yolact_dataset from src.lr_schedule import dynamic_lr from src.network_define import WithLossCell set_seed(1) parser = argparse.ArgumentParser(description="Yolact++ training") parser.add_argument("--only_create_dataset", type=ast.literal_eval, default=False, help="If set it true, only create " "Mindrecord, default is false.") # Modelarts --run_distribute default is True parser.add_argument("--run_distribute", type=ast.literal_eval, default=False, help="Run distribute, default is false.") parser.add_argument("--do_train", type=ast.literal_eval, default=True, help="Do train or not, default is true.") parser.add_argument("--do_eval", type=ast.literal_eval, default=False, help="Do eval or not, default is false.") parser.add_argument("--dataset", type=str, default="coco", help="Dataset, default is coco.") parser.add_argument("--pre_trained", type=str, default=None, help="Pretrain file path.") parser.add_argument("--device_id", type=int, default=3, help="Device id, default is 0.") parser.add_argument("--device_num", type=int, default=1, help="Use device nums, default is 1.") parser.add_argument("--rank_id", type=int, default=0, help="Rank id, default is 0.") parser.add_argument("--net_ckpt", type=str, default="/data/yolact/yolact-20_619.ckpt", help="Do") parser.add_argument("--run_platform", type=str, default="Ascend", choices="Ascend", help="run platform, only support Ascend.") parser.add_argument("--distribute", type=ast.literal_eval, default=False, help="Run distribute, default is False.") parser.add_argument("--train_url", type=str, default="obs://xxx", help="ckpt output dir in obs") # Modelarts parser.add_argument("--data_url", type=str, default="obs://xxx", help="mindrecord file path.") # Modelarts parser.add_argument('--is_modelarts', type=str, default="False", help='is train on modelarts') args_opt = parser.parse_args() class TransferCallback(Callback): """Callback""" def __init__(self, local_train_path, obs_train_path): super(TransferCallback, self).__init__() self.local_train_path = local_train_path self.obs_train_path = obs_train_path def step_end(self, run_context): cb_params = run_context.original_args() current_epoch = cb_params.cur_epoch_num if current_epoch % 10 == 0 and current_epoch != 0: mox.file.copy_parallel(self.local_train_path, self.obs_train_path) def init_weights(module): for name, cell in module.cells_and_names(): is_conv_layer = isinstance(cell, nn.Conv2d) if is_conv_layer and "backbone" not in name: cell.weight.set_data(init_p.initializer('XavierUniform', cell.weight.shape)) if cell.has_bias is True: cell.bias.set_data(init_p.initializer('zeros', cell.bias.shape)) if __name__ == '__main__': print("Start train for yolact!") if args_opt.run_platform == "Ascend": if args_opt.is_modelarts == "True": import moxing as mox context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", device_id=device_id, max_call_depth=10000) device_id = int(os.getenv('DEVICE_ID'), 0) if not args_opt.do_eval and args_opt.run_distribute: init() rank = get_rank() device_num = get_group_size() context.set_auto_parallel_context(device_num=device_num, parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True) else: rank = 0 device_num = 1 else: context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", max_call_depth=10000, enable_reduce_precision=True) if args_opt.distribute: if os.getenv("DEVICE_ID", "not_set").isdigit(): context.set_context(device_id=int(os.getenv("DEVICE_ID"))) init() device_num = int(os.getenv("DEVICE_NUM")) rank = int(os.getenv("RANK_ID")) rank_size = int(os.getenv("RANK_SIZE")) context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True, device_num=device_num) else: rank = 0 device_num = 1 context.set_context(device_id=int(args_opt.device_id), save_graphs=True) else: raise ValueError("Unsupported platform.") print("Start create dataset!") if args_opt.is_modelarts == "True": ckpt_filename = "resnet50.ckpt" local_data_url = "/cache/mr/" + str(device_id) local_pretrained_url = "/cache/weights/" local_train_url = "/cache/ckpt" mox.file.make_dirs(local_data_url) mox.file.make_dirs(local_train_url) mox.file.make_dirs(local_pretrained_url) local_pretrained_url = local_pretrained_url + "resnet50.ckpt" filename = "yolact.mindrecord0" mox.file.copy_parallel(args_opt.data_url, local_data_url) if args_opt.pre_trained is not None: mox.file.copy(args_opt.pre_trained, local_pretrained_url) local_data_path = os.path.join(local_data_url, filename) else: prefix = "yolact.mindrecord" mindrecord_dir = cfg['mindrecord_dir'] mindrecord_file = os.path.join(mindrecord_dir, prefix + "0") if rank == 0 and not os.path.exists(mindrecord_file): if not os.path.isdir(mindrecord_dir): os.makedirs(mindrecord_dir) if args_opt.dataset == "coco": if os.path.isdir(cfg['coco_root']): print("Create Mindrecord.") data_to_mindrecord_byte_image("coco", True, prefix) print("Create Mindrecord Done, at {}".format(mindrecord_dir)) else: raise Exception("coco_root not exits.") else: if os.path.isdir(cfg['IMAGE_DIR']) and os.path.exists(cfg['ANNO_PATH']): print("Create Mindrecord.") data_to_mindrecord_byte_image("other", True, prefix) print("Create Mindrecord Done, at {}".format(mindrecord_dir)) else: raise Exception("IMAGE_DIR or ANNO_PATH not exits.") if not args_opt.only_create_dataset: if args_opt.is_modelarts == "True": dataset = create_yolact_dataset(local_data_path, batch_size=cfg['batch_size'], device_num=device_num, rank_id=rank) else: dataset = create_yolact_dataset(mindrecord_file, batch_size=cfg['batch_size'], device_num=device_num, rank_id=rank) num_steps = dataset.get_dataset_size() print("pre epoch step num: ", num_steps) print("Create dataset done!") net = Yolact() net = net.set_train() if args_opt.is_modelarts == "True": ckpt_file_name = "resnet50.ckpt" backbone_path = local_pretrained_url if args_opt.pre_trained is not None: param_dict = load_checkpoint(backbone_path) if cfg['pretrain_epoch_size'] == 0: for item in list(param_dict.keys()): if not item.startswith('backbone'): param_dict.pop(item) load_param_into_net(net, param_dict) init_weights(net) if args_opt.is_modelarts == "False": ckpt_path = args_opt.net_ckpt if ckpt_path != "": param_dict = load_checkpoint(ckpt_path) load_param_into_net(net, param_dict) loss = MultiBoxLoss(num_classes=cfg['num_classes'], pos_threshold=cfg['positive_iou_threshold'], neg_threshold=cfg['negative_iou_threshold'], negpos_ratio=cfg['ohem_negpos_ratio'], batch_size=cfg['batch_size'], num_priors=cfg['num_priors']) net_with_loss = WithLossCell(net, loss) lr = Tensor(dynamic_lr(cfg, start_epoch=0, total_epochs=cfg['epoch_size'], steps_each_epoch=num_steps), mstype.float32) opt = nn.Momentum(params=net.trainable_params(), learning_rate=cfg['lr'], momentum=cfg['momentum'], weight_decay=cfg['decay'], loss_scale=cfg['loss_scale']) # define model if args_opt.is_modelarts == "True": model = Model(net_with_loss, optimizer=opt, amp_level='O0') else: model = Model(net_with_loss, optimizer=opt, amp_level='O3') print("============== Starting Training ==============") time_cb = TimeMonitor(data_size=num_steps) loss_cb = LossMonitor() if args_opt.is_modelarts == "True": summary_collector = SummaryCollector(summary_dir='./summary_dir', collect_freq=100) else: summary_collector = SummaryCollector(summary_dir='./summary_dir', collect_freq=10) cb = [time_cb, loss_cb] if args_opt.is_modelarts == "True": if cfg['save_checkpoint']: ckptconfig = CheckpointConfig(save_checkpoint_steps=cfg['save_checkpoint_epochs'] * num_steps, keep_checkpoint_max=cfg['keep_checkpoint_max']) ckpoint_cb = ModelCheckpoint(prefix='yolact', directory=local_train_url, config=ckptconfig) transferCb = TransferCallback(local_train_url, args_opt.train_url) if device_id == 0: cb += [ckpoint_cb, transferCb] model.train(cfg['epoch_size'], dataset, callbacks=cb, dataset_sink_mode=True) else: if cfg['save_checkpoint']: ckptconfig = CheckpointConfig(save_checkpoint_steps=cfg['save_checkpoint_epochs'] * num_steps, keep_checkpoint_max=cfg['keep_checkpoint_max']) save_checkpoint_path = os.path.join(cfg['save_checkpoint_path'], 'ckpt_' + str(rank) + '/') ckpoint_cb = ModelCheckpoint(prefix='yolact', directory=save_checkpoint_path, config=ckptconfig) cb += [ckpoint_cb] model.train(cfg['epoch_size'], dataset, callbacks=cb, dataset_sink_mode=False) print("============== End Training ==============")
788
0
76
d9b0d046b1f7c5af9b84530342ad4cf9beb9cbd8
1,481
py
Python
zksync_sdk/types/signatures.py
zksync-sdk/zksync-python
740020b6c6b83548cf6cd2ec1b4af94316a74667
[ "MIT" ]
22
2021-03-05T07:01:05.000Z
2022-03-26T19:15:19.000Z
zksync_sdk/types/signatures.py
zksync-sdk/zksync-python
740020b6c6b83548cf6cd2ec1b4af94316a74667
[ "MIT" ]
23
2021-03-01T06:09:26.000Z
2022-02-17T21:54:44.000Z
zksync_sdk/types/signatures.py
zksync-sdk/zksync-python
740020b6c6b83548cf6cd2ec1b4af94316a74667
[ "MIT" ]
10
2021-03-08T13:43:49.000Z
2021-08-23T16:18:14.000Z
from dataclasses import dataclass from enum import Enum @dataclass @dataclass
25.101695
82
0.617151
from dataclasses import dataclass from enum import Enum class SignatureType(Enum): ethereum_signature = "EthereumSignature" EIP1271_signature = "EIP1271Signature" @dataclass class TxEthSignature: sig_type: SignatureType signature: str @classmethod def from_dict(cls, json: dict): """ Only the difference from __init__ that signature is already in hex format """ obj = cls(sig_type=SignatureType(json["type"]), signature=b"") obj.signature = json["signature"] return obj def __init__(self, sig_type: SignatureType, signature: bytes): self.signature = signature.hex() self.sig_type = sig_type def dict(self): return { "type": self.sig_type.value, "signature": self.signature } @dataclass class TxSignature: public_key: str signature: str @classmethod def from_dict(cls, json: dict): """ Only the difference from __init__ is that values are already in hex format """ obj = cls(public_key=b"", signature=b"") obj.public_key = json["pubKey"] obj.signature = json["signature"] return obj def __init__(self, public_key: bytes, signature: bytes): self.public_key = public_key.hex() self.signature = signature.hex() def dict(self): return { "pubKey": self.public_key, "signature": self.signature }
444
887
67
ac93c64ddf02c37ad59edc10df03a3fbb2005e63
5,860
py
Python
model/Birdview/test.py
MinesNicaicai/large-scale-pointcloud-matching
cfe140f2be1110ed75b6edd27538021e513a31c9
[ "MIT" ]
1
2020-11-21T16:39:51.000Z
2020-11-21T16:39:51.000Z
model/Birdview/test.py
MinesNicaicai/large-scale-pointcloud-matching
cfe140f2be1110ed75b6edd27538021e513a31c9
[ "MIT" ]
null
null
null
model/Birdview/test.py
MinesNicaicai/large-scale-pointcloud-matching
cfe140f2be1110ed75b6edd27538021e513a31c9
[ "MIT" ]
1
2020-12-13T14:51:44.000Z
2020-12-13T14:51:44.000Z
import argparse import os import matplotlib.pyplot as plt import torch import torch.nn as nn from PIL import Image from torchvision.models import resnet18 from dataset import * from netvlad import EmbedNet from netvlad import NetVLAD parser = argparse.ArgumentParser(description='WayzNetVlad') # parser.add_argument('--mode', type=str, default='train', help='Mode', choices=['train', 'validation']) # parser.add_argument('--batch_size', type=int, default=2, help='batch_size') parser.add_argument('--dataset_dir', type=str, default='/media/admini/My_data/0921/dataset_cam4', help='dataset_dir') parser.add_argument('--num_workers', type=int, default=1, help='num_workers') # parser.add_argument('--from_scratch', type=bool, default=True, help='from_scratch') # parser.add_argument('--pretrained_embedding', type=bool, default=True, help='pretrained_embedding') # parser.add_argument('--num_similar_neg', type=int, default=2, help='num_similar_neg') # parser.add_argument('--margin', type=float, default=1.0, help='margin') parser.add_argument('--use_gpu', type=bool, default=True, help='use_gpu') # parser.add_argument('--learning_rate', type=float, default=0.0005, help='learning_rate') # parser.add_argument('--positive_radius', type=float, default=0.3, help='positive_search_radius') # parser.add_argument('--negative_filter_radius', type=float, default=2.0, help='negative_filter_radius') parser.add_argument('--saved_model_path', type=str, default='saved_model', help='saved_model') # parser.add_argument('--epochs', type=int, default=10, help='epochs') parser.add_argument('--load_checkpoints', type=bool, default=True, help='load_checkpoints') parser.add_argument('--num_clusters', type=int, default=64, help='num_clusters') # parser.add_argument('--images_dir', type=str, default='/media/admini/My_data/0921/dataset_cam4/images', # help='images_dir') parser.add_argument('--queried_image', type=str, default='/media/admini/My_data/0921/dataset_cam4/images/001041.png', help='queried_image') parser.add_argument('--generate_database', type=bool, default=False, help='generate_database') args = parser.parse_args() if __name__ == '__main__': test()
47.258065
123
0.690785
import argparse import os import matplotlib.pyplot as plt import torch import torch.nn as nn from PIL import Image from torchvision.models import resnet18 from dataset import * from netvlad import EmbedNet from netvlad import NetVLAD parser = argparse.ArgumentParser(description='WayzNetVlad') # parser.add_argument('--mode', type=str, default='train', help='Mode', choices=['train', 'validation']) # parser.add_argument('--batch_size', type=int, default=2, help='batch_size') parser.add_argument('--dataset_dir', type=str, default='/media/admini/My_data/0921/dataset_cam4', help='dataset_dir') parser.add_argument('--num_workers', type=int, default=1, help='num_workers') # parser.add_argument('--from_scratch', type=bool, default=True, help='from_scratch') # parser.add_argument('--pretrained_embedding', type=bool, default=True, help='pretrained_embedding') # parser.add_argument('--num_similar_neg', type=int, default=2, help='num_similar_neg') # parser.add_argument('--margin', type=float, default=1.0, help='margin') parser.add_argument('--use_gpu', type=bool, default=True, help='use_gpu') # parser.add_argument('--learning_rate', type=float, default=0.0005, help='learning_rate') # parser.add_argument('--positive_radius', type=float, default=0.3, help='positive_search_radius') # parser.add_argument('--negative_filter_radius', type=float, default=2.0, help='negative_filter_radius') parser.add_argument('--saved_model_path', type=str, default='saved_model', help='saved_model') # parser.add_argument('--epochs', type=int, default=10, help='epochs') parser.add_argument('--load_checkpoints', type=bool, default=True, help='load_checkpoints') parser.add_argument('--num_clusters', type=int, default=64, help='num_clusters') # parser.add_argument('--images_dir', type=str, default='/media/admini/My_data/0921/dataset_cam4/images', # help='images_dir') parser.add_argument('--queried_image', type=str, default='/media/admini/My_data/0921/dataset_cam4/images/001041.png', help='queried_image') parser.add_argument('--generate_database', type=bool, default=False, help='generate_database') args = parser.parse_args() def test(): encoder = resnet18() base_model = nn.Sequential( encoder.conv1, encoder.bn1, encoder.relu, encoder.maxpool, encoder.layer1, encoder.layer2, encoder.layer3, encoder.layer4 ) dim = list(base_model.parameters())[-1].shape[0] # last channels (512) # Define model for embedding net_vlad = NetVLAD(num_clusters=args.num_clusters, dim=dim, alpha=1.0) model = EmbedNet(base_model, net_vlad) # base_model_checkpoint = torch.load(os.path.join(args.saved_model_path, 'base_model.pth.tar'), # map_location=lambda storage, loc: storage) # net_vlad_checkpoint = torch.load(os.path.join(args.saved_model_path, 'net_vlad.pth.tar'), # map_location=lambda storage, loc: storage) # base_model.load_state_dict(base_model_checkpoint) # net_vlad.load_state_dict(net_vlad_checkpoint) model_checkpoint = torch.load(os.path.join(args.saved_model_path, 'model.pth.tar'), map_location=lambda storage, loc: storage) model.load_state_dict(model_checkpoint) print("Loaded model checkpoints from \'{}\'.".format(args.saved_model_path)) # # torch.save(model.state_dict(), os.path.join(args.saved_model_path, 'model.pth.tar')) # # image_filenames = [os.path.join(args.images_dir, x) for x in os.listdir(args.images_dir) if # ImageDatabase.is_image_file(x)] images_info = make_images_info(args.dataset_dir, with_struct_file=False) images_dir = os.path.join(args.dataset_dir, 'images') if args.generate_database: image_database = ImageDatabase(images_info=images_info, images_dir=images_dir, model=model, generate_database=True) image_database.export_database('database_0921_cam4.npy') else: image_database = ImageDatabase(images_info=images_info, images_dir=images_dir, model=model, generate_database=False) image_database.import_database('database_0921_cam4.npy') queried_image_filename = args.queried_image queried_image_filenames = [ queried_image_filename # '/media/admini/My_data/0923/dataset_xiaomi/images/1322.png', # '/media/admini/My_data/0923/dataset_xiaomi/images/1433.png', # '/media/admini/My_data/0923/dataset_xiaomi/images/1544.png', # '/media/admini/My_data/0923/dataset_xiaomi/images/1655.png', # '/media/admini/My_data/0923/dataset_xiaomi/images/1766.png', # '/media/admini/My_data/0923/dataset_xiaomi/images/1877.png', ] plotted_images = [] num_results = 5 for queried_image_filename in queried_image_filenames: query_results = image_database.query_image(queried_image_filename, num_results=num_results) # print('query_result: \n{}'.format(query_results)) queried_image = Image.open(queried_image_filename) # result_image = Image.open(os.path.join(images_dir, query_results[0]['image_file'])) result_images = [Image.open(os.path.join(images_dir, result['image_file'])) for result in query_results] plotted_images += [queried_image] + result_images plot_images(plotted_images, num_results+1) def plot_images(images, cols): plt.figure() # plt.subplot(1, 1, 1) # plt.imshow(images[0]) for i in range(0, len(images)): # row = i // cols # col = i % cols plt.subplot(len(images) // cols, cols, i + 1) plt.imshow(images[i]) plt.show() if __name__ == '__main__': test()
3,607
0
46
e0a74c8b5c04dabcccb68f37533b7fb8c88dedad
5,112
py
Python
runtool/tests/test_transformations/test_recurse_config.py
arangatang/gluon-ts-tools
26509b853ddf1039993779f6049eafd4ec434ff7
[ "Apache-2.0" ]
null
null
null
runtool/tests/test_transformations/test_recurse_config.py
arangatang/gluon-ts-tools
26509b853ddf1039993779f6049eafd4ec434ff7
[ "Apache-2.0" ]
null
null
null
runtool/tests/test_transformations/test_recurse_config.py
arangatang/gluon-ts-tools
26509b853ddf1039993779f6049eafd4ec434ff7
[ "Apache-2.0" ]
null
null
null
from runtool.recurse_config import ( recursive_apply, recursive_apply_dict, recursive_apply_list, Versions, ) def transform(node: dict): """ Converts node to a version object if the node has a key "versions" else it multiplies the node by 2 if the node has key "double" """ if "version" in node: return Versions(node["version"]) elif "double" in node: return 2 * node["double"] return node
27.336898
70
0.385563
from runtool.recurse_config import ( recursive_apply, recursive_apply_dict, recursive_apply_list, Versions, ) def transform(node: dict): """ Converts node to a version object if the node has a key "versions" else it multiplies the node by 2 if the node has key "double" """ if "version" in node: return Versions(node["version"]) elif "double" in node: return 2 * node["double"] return node def compare_recursive_apply(node, expected, fn=transform): assert recursive_apply(node, fn) == expected def test_recursive_apply_double_simple(): compare_recursive_apply( node={"double": 1}, expected=2, ) def test_recursive_apply_double_nested(): compare_recursive_apply( node={ "no_double": 2, "double_this": {"double": 2}, }, expected={"no_double": 2, "double_this": 4}, ) def test_recursive_apply_versions(): compare_recursive_apply( node={ "my_list": [ {"hello": "there"}, {"a": {"version": [1, 2]}}, ] }, expected=Versions( [ {"my_list": [{"hello": "there"}, {"a": 1}]}, {"my_list": [{"hello": "there"}, {"a": 2}]}, ] ), ) def test_recursive_apply_trivial(): compare_recursive_apply({}, {}) def test_recursive_apply_merging_versions_simple(): compare_recursive_apply( node=[Versions([1, 2])], expected=Versions([[1], [2]]), fn=lambda x: x, ) def test_recursive_apply_merging_versions_list(): compare_recursive_apply( node=[Versions([1, 2]), Versions([3, 4])], expected=Versions([[1, 3], [1, 4], [2, 3], [2, 4]]), fn=lambda x: x, ) def test_recursive_apply_merging_versions_dict(): compare_recursive_apply( node={"a": Versions([1, 2]), "b": Versions([3, 4])}, expected=Versions( [ {"a": 1, "b": 3}, {"a": 1, "b": 4}, {"a": 2, "b": 3}, {"a": 2, "b": 4}, ] ), fn=lambda x: x, ) def test_recursive_apply_merging_versions_list_in_dict(): compare_recursive_apply( node={ "a": [Versions([1, 2]), Versions([3, 4])], "b": [Versions([5, 6]), Versions([7, 8])], }, expected=Versions( [ {"a": [1, 3], "b": [5, 7]}, {"a": [1, 3], "b": [5, 8]}, {"a": [1, 3], "b": [6, 7]}, {"a": [1, 3], "b": [6, 8]}, {"a": [1, 4], "b": [5, 7]}, {"a": [1, 4], "b": [5, 8]}, {"a": [1, 4], "b": [6, 7]}, {"a": [1, 4], "b": [6, 8]}, {"a": [2, 3], "b": [5, 7]}, {"a": [2, 3], "b": [5, 8]}, {"a": [2, 3], "b": [6, 7]}, {"a": [2, 3], "b": [6, 8]}, {"a": [2, 4], "b": [5, 7]}, {"a": [2, 4], "b": [5, 8]}, {"a": [2, 4], "b": [6, 7]}, {"a": [2, 4], "b": [6, 8]}, ] ), fn=lambda x: x, ) def test_recursive_apply_merging_versions_list_in_list(): compare_recursive_apply( node=[ [Versions([1, 2]), Versions([3, 4])], [Versions([5, 6]), Versions([7, 8])], ], expected=Versions( [ [[1, 3], [5, 7]], [[1, 3], [5, 8]], [[1, 3], [6, 7]], [[1, 3], [6, 8]], [[1, 4], [5, 7]], [[1, 4], [5, 8]], [[1, 4], [6, 7]], [[1, 4], [6, 8]], [[2, 3], [5, 7]], [[2, 3], [5, 8]], [[2, 3], [6, 7]], [[2, 3], [6, 8]], [[2, 4], [5, 7]], [[2, 4], [5, 8]], [[2, 4], [6, 7]], [[2, 4], [6, 8]], ] ), fn=lambda x: x, ) def test_recursive_apply_merging_versions_dict_in_dict(): compare_recursive_apply( node={"a": {"b": Versions([1, 2])}, "c": Versions([2, 3])}, expected=Versions( [ {"a": {"b": 1}, "c": 2}, {"a": {"b": 1}, "c": 3}, {"a": {"b": 2}, "c": 2}, {"a": {"b": 2}, "c": 3}, ] ), fn=lambda x: x, ) def test_recursive_apply_merging_versions_with_function(): compare_recursive_apply( node={ "my_list": [ {"hello": "there"}, {"a": {"version": [1, 2]}}, {"b": {"version": [3, 4]}}, ] }, expected=Versions( [ {"my_list": [{"hello": "there"}, {"a": 1}, {"b": 3}]}, {"my_list": [{"hello": "there"}, {"a": 1}, {"b": 4}]}, {"my_list": [{"hello": "there"}, {"a": 2}, {"b": 3}]}, {"my_list": [{"hello": "there"}, {"a": 2}, {"b": 4}]}, ] ), )
4,372
0
276
8b20124b5243f95d96250a4026b4f24c594f8ebb
2,160
py
Python
tube/etl/indexers/interpreter.py
chicagopcdc/tube
547d021fe00aa25995e7e8d2eb2a57f5633395fd
[ "Apache-2.0" ]
null
null
null
tube/etl/indexers/interpreter.py
chicagopcdc/tube
547d021fe00aa25995e7e8d2eb2a57f5633395fd
[ "Apache-2.0" ]
2
2021-03-24T16:04:22.000Z
2021-03-30T12:54:07.000Z
tube/etl/indexers/interpreter.py
chicagopcdc/tube
547d021fe00aa25995e7e8d2eb2a57f5633395fd
[ "Apache-2.0" ]
null
null
null
import yaml from .aggregation.translator import Translator as AggregatorTranslator from tube.etl.indexers.injection.translator import Translator as InjectionTranslator from .base.translator import Translator as BaseTranslator from tube.utils.dd import init_dictionary from tube.etl.outputs.es.writer import Writer
34.83871
84
0.668981
import yaml from .aggregation.translator import Translator as AggregatorTranslator from tube.etl.indexers.injection.translator import Translator as InjectionTranslator from .base.translator import Translator as BaseTranslator from tube.utils.dd import init_dictionary from tube.etl.outputs.es.writer import Writer def create_translators(sc, config): dictionary, model = init_dictionary(config.DICTIONARY_URL) mappings = yaml.load(open(config.MAPPING_FILE), Loader=yaml.SafeLoader) writer = Writer(sc, config) translators = {} for m in mappings["mappings"]: if m["type"] == "aggregator": translator = AggregatorTranslator( sc, config.HDFS_DIR, writer, m, model, dictionary ) elif m["type"] == "collector": translator = InjectionTranslator( sc, config.HDFS_DIR, writer, m, model, dictionary ) else: translator = BaseTranslator(sc, config.HDFS_DIR, writer) translators[translator.parser.doc_type] = translator for translator in list(translators.values()): translator.update_types() return translators def run_transform(translators): need_to_join = {} translator_to_translators = {} for translator in list(translators.values()): df = translator.translate() if df is None: continue translator.save_to_hadoop(df) translator.current_step = 1 if len(translator.parser.joining_nodes) > 0: need_to_join[translator.parser.doc_type] = translator translator_to_translators[translator.parser.doc_type] = [ j.joining_index for j in translator.parser.joining_nodes ] for v in list(need_to_join.values()): df = v.translate_joining_props(translators) v.save_to_hadoop(df) v.current_step += 1 for t in list(translators.values()): df = t.translate_final() t.write(df) def get_index_names(config): stream = open(config.MAPPING_FILE) mappings = yaml.load(stream, Loader=yaml.SafeLoader) return [m["name"] for m in mappings["mappings"]]
1,774
0
69
07ac0d51e2432251bef3d9eac592921ad7a815a7
998
py
Python
5-python-dataviz-homework/plotting_charts_with_matplotlib.py
eduardohl/CEBD-1160-winter-2020-code
ffafffff5100b338b7a4842fc05abd7234732d8e
[ "MIT" ]
1
2020-02-11T04:29:14.000Z
2020-02-11T04:29:14.000Z
5-python-dataviz-homework/plotting_charts_with_matplotlib.py
eduardohl/CEBD-1160-winter-2020-code
ffafffff5100b338b7a4842fc05abd7234732d8e
[ "MIT" ]
null
null
null
5-python-dataviz-homework/plotting_charts_with_matplotlib.py
eduardohl/CEBD-1160-winter-2020-code
ffafffff5100b338b7a4842fc05abd7234732d8e
[ "MIT" ]
5
2020-01-18T21:22:04.000Z
2020-02-27T23:00:07.000Z
# 1 Still using the same DataFrame from the previous exercise insurance.csv plot the chart for charges and save it # as charges_plot.png import pandas as pd import matplotlib.pyplot as plt df = pd.read_csv('data/insurance.csv', header=0) plt.plot(df['charges']) plt.title('Charges Plot') plt.ylabel('Charges') plt.savefig('charges_plot.png') # 2 plot the histogram for bmi and save it as bmi_hist.png plt.clf() plt.hist(df['bmi']) plt.title('BMI Histogram') plt.ylabel('BMI') plt.savefig('bmi_hist.png') # 3 plot the scatterplot for age vs charges and save it as age_charge_scatter.png plt.clf() plt.scatter(df['age'], df['charges']) plt.title('Age vs Charges') plt.xlabel('Age') plt.ylabel('Charges') plt.savefig('age_charge_scatter.png') # 4 Re-do the previous items, adding the title, x label and y label for each item. # done above # 5 Think about the exercise 12 from the previous section. Do the plots match what we saw with the correlation function? # Yes, charges are going up with age
32.193548
120
0.749499
# 1 Still using the same DataFrame from the previous exercise insurance.csv plot the chart for charges and save it # as charges_plot.png import pandas as pd import matplotlib.pyplot as plt df = pd.read_csv('data/insurance.csv', header=0) plt.plot(df['charges']) plt.title('Charges Plot') plt.ylabel('Charges') plt.savefig('charges_plot.png') # 2 plot the histogram for bmi and save it as bmi_hist.png plt.clf() plt.hist(df['bmi']) plt.title('BMI Histogram') plt.ylabel('BMI') plt.savefig('bmi_hist.png') # 3 plot the scatterplot for age vs charges and save it as age_charge_scatter.png plt.clf() plt.scatter(df['age'], df['charges']) plt.title('Age vs Charges') plt.xlabel('Age') plt.ylabel('Charges') plt.savefig('age_charge_scatter.png') # 4 Re-do the previous items, adding the title, x label and y label for each item. # done above # 5 Think about the exercise 12 from the previous section. Do the plots match what we saw with the correlation function? # Yes, charges are going up with age
0
0
0
a54c7c2864b2796866b44586d232abc572d83805
36,346
py
Python
bank/database/operate.py
zpf0117b/BankingManagementSystem
7e6c99e827bb0899d2389e8893311a557c336ea9
[ "MIT" ]
3
2020-09-03T12:39:21.000Z
2021-06-16T02:37:54.000Z
bank/database/operate.py
zpf0117b/BankingManagementSystem
7e6c99e827bb0899d2389e8893311a557c336ea9
[ "MIT" ]
4
2021-05-15T11:10:18.000Z
2021-05-15T11:10:26.000Z
bank/database/operate.py
zpf0117b/BankingManagementSystem
7e6c99e827bb0899d2389e8893311a557c336ea9
[ "MIT" ]
null
null
null
import sys from os import path d = path.dirname(__file__) # 获取当前路径 sys.path.append(d) from model import BankClient, BankName, Staff, Charge, Cheque, Loan, Lend, Payments, PossessCheque, Saving, PossessSaving, database from datetime import date, datetime, timedelta from peewee import DatabaseError, DataError, IntegrityError, InterfaceError, InternalError, NotSupportedError, OperationalError, ProgrammingError, fn, Alias from random import randint import pandas as pd import pandas_profiling as pdp # return: msg, info # return msg, info # return msg, info # return msg, brief info
41.349261
313
0.615749
import sys from os import path d = path.dirname(__file__) # 获取当前路径 sys.path.append(d) from model import BankClient, BankName, Staff, Charge, Cheque, Loan, Lend, Payments, PossessCheque, Saving, PossessSaving, database from datetime import date, datetime, timedelta from peewee import DatabaseError, DataError, IntegrityError, InterfaceError, InternalError, NotSupportedError, OperationalError, ProgrammingError, fn, Alias from random import randint import pandas as pd import pandas_profiling as pdp class ClientOperate(object): # return: msg, info def __init__(self, branch_name, handle_client): self.handle_client = handle_client self.branch_name = branch_name def get_client_info(self): try: # query2 = Charge.select(BankClient.client_id_number, BankClient.client_name, Charge.staff_id_number).join(BankClient, # on=(Charge.client_id_number == BankClient.client_id_number)).where(Charge.branchbankname == self.branch_name) query = BankClient.select().where(BankClient.client_id_number == self.handle_client).get() except Exception as e: msg1 = repr(e) client_info = ['' for i in range(8)] else: client_info = [query.client_name, query.client_id_number, query.client_contact, query.client_address, query.contactor_name, query.relationship, query.contactor_phone, query.contactor_email] msg1 = 'success' finally: pass client_info.extend(['','','']) try: query2 = Charge.select().where((Charge.client_id_number == self.handle_client) & (Charge.branchbankname == self.branch_name)) except Charge.DoesNotExist as e: msg2 = 'success' except Exception as e: msg2 = repr(e) else: for _query2 in query2: if _query2.loan_or_account == 1: client_info[10] = _query2.staff_id_number elif _query2.loan_or_account == 2: # 00: no account or loan ; 2: account; 1: cheque, 2: saving if _query2.cheque_or_saving == 1: client_info[9] = _query2.staff_id_number elif _query2.cheque_or_saving == 2: client_info[8] = _query2.staff_id_number msg2 = 'success' finally: pass msg = 'success' if msg1 == 'success' and msg2 == 'success' else msg1 + msg2 return msg, client_info def insert_client_info(self, client_name, client_id, client_phone, client_address, contact_name, relationship, contact_phone, contact_email): # necessary info complete and valid -> add # you cannot use self.handle_client if len(client_name) > 20 or '"' in client_name or "'" in client_name or client_id[0:2] != 'ID' or len(client_phone) != 11: msg = '用户信息不合法' elif len(contact_phone) != 11 or '@' not in contact_email or '"' in contact_name or "'" in contact_name: msg = '联系人信息不合法' else: try: client_item = BankClient.create(client_name=client_name, client_id_number=client_id, client_contact=client_phone, client_address=client_address, contactor_name=contact_name, contactor_phone=contact_phone, contactor_email=contact_email, relationship=relationship) except Exception as e: msg = repr(e) else: try: query1 = Staff.select().where(Staff.branch_bank_name == self.branch_name) except Exception as e: client_item.delete_instance() msg = 'no staff, empty bank' + repr(e) else: choice_staff = randint(0, query1.count() - 1) charge_item = Charge.create(branchbankname=self.branch_name, client_id_number=client_id, staff_id_number=query1[choice_staff].staff_id_number, loan_or_account=0, cheque_or_saving=0) charge_item.save() msg = 'success' finally: print('insert_client_info: ' + msg) return msg def change_client_info(self, client_name, client_phone, client_address, contact_name, relationship, contact_phone, contact_email): # not none and valid -> change try: # print(client_address) # query2 = Charge.select(BankClient.client_id_number, BankClient.client_name, Charge.staff_id_number).join(BankClient, # on=(Charge.client_id_number == BankClient.client_id_number)).where(Charge.branchbankname == self.branch_name) query = BankClient.select().where(BankClient.client_id_number == self.handle_client).get() except BankClient.DoesNotExist as e: msg = 'client id not exist' except Exception as e: msg = repr(e) else: msg = 'success' # just modify valid info, not raise exception, check whether saved by yourself if client_name != '' and len(client_name) < 20 and '"' not in client_name and "'" not in client_name: query.client_name = client_name try: query.save() except Exception as e: pass if client_phone != '' and len(client_phone) == 11: query.client_contact = client_phone try: query.save() except Exception as e: pass if client_address != '': query.client_address = client_address try: query.save() except Exception as e: pass if contact_name != '' and '"' not in contact_name and "'" not in contact_name: query.contactor_name = contact_name try: query.save() except Exception as e: pass if relationship != '': query.relationship = relationship try: query.save() except Exception as e: pass if contact_phone != '' and len(contact_phone) == 11: query.contactor_phone = contact_phone try: query.save() except Exception as e: pass if contact_email != '' and '@' in contact_email: query.contactor_email = contact_email try: query.save() except Exception as e: pass finally: print(msg) return msg def delete_client(self): # you can delete -> delete try: query = Charge.select().where((Charge.client_id_number == self.handle_client) & (Charge.branchbankname == self.branch_name)) cursor = database.execute(query) except Exception as e: msg = repr(e) else: print(cursor.description) if cursor.description[0][2] is None: try: BankClient.delete().where(BankClient.client_id_number == self.handle_client).execute() except IntegrityError as e: msg = repr(e) except Exception as e: msg = repr(e) else: msg = 'success' finally: pass else: msg = '客户有账户,不允许删除' finally: print(msg) return msg class AccountOperate(object): # return msg, info def __init__(self, branch_name, handle_account): self.handle_account = handle_account self.branch_name = branch_name def get_account_info(self): # msg, account_info, cheque_or_saving, client_in_account, client_notin_account try: # query2 = Charge.select(BankClient.client_id_number, BankClient.client_name, Charge.staff_id_number).join(BankClient, # on=(Charge.client_id_number == BankClient.client_id_number)).where(Charge.branchbankname == self.branch_name) query1 = Saving.select().where(Saving.account_number == self.handle_account).get() except Saving.DoesNotExist as e: try: # query2 = Charge.select(BankClient.client_id_number, BankClient.client_name, Charge.staff_id_number).join(BankClient, # on=(Charge.client_id_number == BankClient.client_id_number)).where(Charge.branchbankname == self.branch_name) query2 = Cheque.select().where(Cheque.account_number == self.handle_account).get() except Cheque.DoesNotExist as e: msg = '账户信息读取失败' cheque_or_saving = 0 client_in_account = [['', '']] client_notin_account = [['']] account_info = ['' for i in range(7)] except Exception as e: msg = repr(e) cheque_or_saving = 0 client_in_account = [['', '']] client_notin_account = [['']] account_info = ['' for i in range(7)] else: account_info = [query2.account_number, '支票账户', query2.open_date, query2.remaining_sum, query2.overdraft, '', ''] cheque_or_saving = 1 client_in_account = [] client_notin_account = [] # SELECT PossessCheque ... try: query3 = PossessCheque.select().where(PossessCheque.account_number == self.handle_account) except PossessCheque.DoesNotExist as e: msg = '支票账户信息读取失败' cheque_or_saving = 0 client_in_account.append(['', '']) client_notin_account.append(['']) except Exception as e: msg = repr(e) cheque_or_saving = 0 client_in_account.append(['', '']) client_notin_account.append(['']) else: for _client in query3: client_in_account.append([_client.client_id_number, _client.last_visit_cheque_date]) try: query4 = BankClient.select().where(BankClient.client_id_number.not_in([_client.client_id_number for _client in query3])) except BankClient.DoesNotExist as e: msg = 'success' client_notin_account.append(['']) except Exception as e: msg = repr(e) client_notin_account.append(['']) else: for _client2 in query4: client_notin_account.append([_client2.client_id_number, '']) msg = 'success' # SELECT BankClient ... NOT IN except Exception as e: msg = repr(e) cheque_or_saving = 0 client_in_account = [['', '']] client_notin_account = [['']] account_info = ['' for i in range(7)] else: account_info = [query1.account_number, '储蓄账户', query1.open_date, query1.remaining_sum, '', query1.interest_rate, query1.currency] cheque_or_saving = -1 client_in_account = [] client_notin_account = [] # SELECT PossessCheque ... try: query3 = PossessSaving.select().where(PossessSaving.account_number == self.handle_account) except PossessSaving.DoesNotExist as e: msg = '储蓄账户信息读取失败' cheque_or_saving = 0 client_in_account.append(['', '']) client_notin_account.append(['']) except Exception as e: msg = repr(e) cheque_or_saving = 0 client_in_account.append(['', '']) client_notin_account.append(['']) else: for _client in query3: client_in_account.append([_client.client_id_number, _client.last_visit_saving_date]) try: query4 = BankClient.select().where(BankClient.client_id_number.not_in([_client.client_id_number for _client in query3])) except BankClient.DoesNotExist as e: msg = 'success' client_notin_account.append(['']) except Exception as e: msg = repr(e) client_notin_account.append(['']) else: for _client in query4: client_notin_account.append([_client.client_id_number, '']) msg = 'success' finally: print(msg) print('test',client_in_account, client_notin_account) return msg, account_info, cheque_or_saving, client_in_account, client_notin_account def insert_account_info(self, remaining_sum, client_in_account, account_number, open_date, cheque_or_saving, interest_rate, currency, overdraft): # necessary info complete and valid -> add # you cannot use self.handle_client # ignore unnecessary info, opendate = visitdate # check: one cheque and one saving for one client in one branch # Cheque/Saving PossessCheque/PossessSaving random: Charge if cheque_or_saving == 'saving': if not str(interest_rate).replace('.','0').isdigit() or not str(remaining_sum).replace('.','0').isdigit(): msg = '储蓄账户信息不合法' else: # Saving try: saving_item = Saving.create(account_number=account_number, branch_bank_name=self.branch_name, currency=currency, interest_rate=interest_rate, open_date=open_date, remaining_sum=remaining_sum) except Exception as e: msg = repr(e) else: # PossessSaving try: for _client in client_in_account: possess_item = PossessSaving.create(account_number=account_number, client_id_number=_client, last_visit_saving_date=open_date) except IntegrityError as e: saving_item.delete_instance() msg = '数据插入出错' print(repr(e)) except Exception as e: saving_item.delete_instance() msg = repr(e) else: # Charge try: possess_charge = {} for _client2 in client_in_account: try: query1 = Charge.select().where((Charge.branchbankname == self.branch_name) & (Charge.client_id_number == _client2) & (Charge.loan_or_account == 0)).get() except Exception as e: saving_item.delete_instance() msg = repr(e) else: # previous account charge_item = Charge.create(branchbankname=self.branch_name, client_id_number=_client2, staff_id_number=query1.staff_id_number, loan_or_account=2, cheque_or_saving=2) possess_charge[_client2] = charge_item except IntegrityError as e: for _client3 in possess_charge.keys(): possess_charge[_client3].delete_instance() saving_item.delete_instance() msg = '唯一键检查出错' print(repr(e)) except Exception as e: for _client4 in possess_charge.keys(): possess_charge[_client4].delete_instance() saving_item.delete_instance() msg = repr(e) print(repr(e)) else: try: query3 = BankName.select().where(BankName.branch_bank_name == self.branch_name).get() query3.assets += float(remaining_sum) query3.save() except Exception as e: for _client4 in possess_charge.keys(): possess_charge[_client4].delete_instance() saving_item.delete_instance() msg = repr(e) print(repr(e)) else: msg = 'success' finally: print('insert_saving_info: ' + msg) return msg else: if not str(overdraft).replace('.','0').isdigit() or not str(remaining_sum).replace('.','0').isdigit(): msg = '支票账户信息不合法' else: # Saving try: cheque_item = Cheque.create(account_number=account_number, branch_bank_name=self.branch_name, overdraft=overdraft, open_date=open_date, remaining_sum=remaining_sum) except Exception as e: msg = repr(e) else: # PossessSaving try: for _client in client_in_account: possess_item = PossessCheque.create(account_number=account_number, client_id_number=_client, last_visit_cheque_date=open_date) except IntegrityError as e: cheque_item.delete_instance() msg = '数据插入出错' print(repr(e)) except Exception as e: cheque_item.delete_instance() msg = repr(e) else: # Charge try: possess_charge = {} for _client2 in client_in_account: try: query1 = Charge.select().where((Charge.branchbankname == self.branch_name) & (Charge.client_id_number == _client2) & (Charge.loan_or_account == 0)).get() except Exception as e: cheque_item.delete_instance() msg = repr(e) else: charge_item = Charge.create(branchbankname=self.branch_name, client_id_number=_client2, staff_id_number=query1.staff_id_number, loan_or_account=2, cheque_or_saving=1) possess_charge[_client2] = charge_item except IntegrityError as e: for _client3 in possess_charge.keys(): possess_charge[_client3].delete_instance() cheque_item.delete_instance() msg = '唯一键检查出错' print(repr(e)) except Exception as e: for _client4 in possess_charge.keys(): possess_charge[_client4].delete_instance() cheque_item.delete_instance() msg = repr(e) else: try: query3 = BankName.select().where(BankName.branch_bank_name == self.branch_name).get() query3.assets += float(remaining_sum) query3.save() except Exception as e: for _client4 in possess_charge.keys(): possess_charge[_client4].delete_instance() cheque_item.delete_instance() msg = repr(e) else: msg = 'success' finally: print('insert_cheque_info: ' + msg) return msg def change_account_info(self, remaining_sum, client_in_account, visit_date, interest_rate, overdraft): # not none and valid -> change # not none and valid -> change print( remaining_sum, client_in_account, visit_date, interest_rate, overdraft) try: # query2 = Charge.select(BankClient.client_id_number, BankClient.client_name, Charge.staff_id_number).join(BankClient, # on=(Charge.client_id_number == BankClient.client_id_number)).where(Charge.branchbankname == self.branch_name) query1 = Saving.select().where(Saving.account_number == self.handle_account).get() except Saving.DoesNotExist as e: try: # query2 = Charge.select(BankClient.client_id_number, BankClient.client_name, Charge.staff_id_number).join(BankClient, # on=(Charge.client_id_number == BankClient.client_id_number)).where(Charge.branchbankname == self.branch_name) query2 = Cheque.select().where(Cheque.account_number == self.handle_account).get() except Exception as e: msg = repr(e) else: msg = 'success' if str(remaining_sum).replace('.', '0').isdigit() and remaining_sum != '': diff = query2.remaining_sum - float(remaining_sum) query2.remaining_sum = float(remaining_sum) try: query2.save() query3 = BankName.select().where(BankName.branch_bank_name == self.branch_name).get() query3.assets -= float(diff) query3.save() except Exception as e: msg = repr(e) print(repr(e)) else: msg = 'success' if str(overdraft).replace('.','0').isdigit() and overdraft != '': query2.overdraft = float(overdraft) try: query2.save() except Exception as e: msg = repr(e) i = 0 for _client in client_in_account: if _client != '' and visit_date[i] != '': try: query4 = PossessCheque.select().where((PossessCheque.client_id_number == _client) & (PossessCheque.account_number == self.handle_account)).get() except Exception as e: pass else: query4.last_visit_cheque_date = visit_date[i] query4.save() i = i + 1 except Exception as e: msg = repr(e) else: msg = 'success' # just modify valid info, not raise exception, check whether saved by yourself if str(remaining_sum).replace('.', '0').isdigit() and remaining_sum != '': diff = query1.remaining_sum - float(remaining_sum) query1.remaining_sum = float(remaining_sum) try: query1.save() query3 = BankName.select().where(BankName.branch_bank_name == self.branch_name).get() query3.assets -= float(diff) query3.save() except Exception as e: msg = repr(e) print(repr(e)) else: msg = 'success' if str(overdraft).replace('.','0').isdigit() and overdraft != '': query1.overdraft = float(overdraft) try: query1.save() except Exception as e: msg = repr(e) i = 0 for _client in client_in_account: if _client != '' and visit_date[i] != '': try: query4 = PossessSaving.select().where((PossessSaving.client_id_number == _client) & (PossessSaving.account_number == self.handle_account)).get() except Exception as e: pass else: query4.last_visit_saving_date = visit_date[i] query4.save() i = i + 1 finally: print(msg) return msg def delete_account(self): # you can delete -> delete try: query1 = Saving.select().where(Saving.account_number == self.handle_account).get() # cursor1 = database.execute(query1) except Saving.DoesNotExist as e: try: query2 = Cheque.select().where(Cheque.account_number == self.handle_account).get() # cursor2 = database.execute(query2) except Exception as e: msg = repr(e) else: try: # delete cheque these_client = PossessCheque.select().where(PossessCheque.account_number == self.handle_account) cursor4 = database.execute(these_client) try: query4 = BankName.select().where(BankName.branch_bank_name == self.branch_name).get() query4.assets -= float(query2.remaining_sum) query4.save() except Exception as e: msg = repr(e) else: Cheque.delete().where(Cheque.account_number == self.handle_account).execute() except Exception as e: msg = repr(e) print(msg) else: try: # if no cheque of this client exists, delete from charge for (account_number, client_id_number, last_visit_cheque_date) in cursor4: print(account_number, client_id_number, last_visit_cheque_date) try: query3 = PossessCheque.select().where((PossessCheque.account_number != self.handle_account) & (PossessCheque.client_id_number == client_id_number)).get() except PossessCheque.DoesNotExist as e: try: q = Charge.delete().where((Charge.client_id_number == client_id_number) & (Charge.branchbankname == self.branch_name) & (Charge.cheque_or_saving == 1)).execute() except Exception as e: msg = repr(e) except Exception as e: msg = repr(e) else: pass # query = Charge.select().where() # cursor = database.execute(query) except Exception as e: msg = repr(e) else: msg = 'success' except Exception as e: msg = repr(e) else: try: # delete saving these_client = PossessSaving.select().where(PossessSaving.account_number == self.handle_account) cursor4 = database.execute(these_client) try: query4 = BankName.select().where(BankName.branch_bank_name == self.branch_name).get() query4.assets -= float(query1.remaining_sum) query4.save() except Exception as e: msg = repr(e) else: Saving.delete().where(Saving.account_number == self.handle_account).execute() except Exception as e: msg = repr(e) else: try: # if no cheque of this client exists, delete from charge for (account_number, client_id_number, last_visit_saving_date) in cursor4: print(account_number, client_id_number, last_visit_saving_date) try: query3 = PossessSaving.select().where((PossessSaving.account_number != self.handle_account) & (PossessSaving.client_id_number == client_id_number)).get() except PossessSaving.DoesNotExist as e: try: q = Charge.delete().where((Charge.client_id_number == client_id_number) & (Charge.branchbankname == self.branch_name) & (Charge.cheque_or_saving == 2)).execute() except Exception as e: msg = repr(e) except Exception as e: msg = repr(e) else: pass # query = Charge.select().where() # cursor = database.execute(query) except Exception as e: msg = repr(e) else: msg = 'success' finally: print(msg) return msg class LoanOperate(object): # return msg, info def __init__(self, branch_name, handle_loan): self.handle_loan = handle_loan self.branch_name = branch_name def get_loan_info(self): # return: msg, loan_info, client_in_loan, loan_payments try: # query2 = Charge.select(BankClient.client_id_number, BankClient.client_name, Charge.staff_id_number).join(BankClient, # on=(Charge.client_id_number == BankClient.client_id_number)).where(Charge.branchbankname == self.branch_name) query2 = (Payments.select().where(Payments.loan_number == self.handle_loan)) query = Loan.select().where(Loan.loan_number == self.handle_loan).get() except Exception as e: msg = repr(e) loan_info = ['' for i in range(3)] client_in_loan = [] loan_payments = ['', '', ''] else: print(query,query2[:]) loan_info = [query.loan_number, query2.count(), query.issue_overall_amount] print(loan_info) msg = 'success' loan_payments = [] for row in query2: loan_payments.append([row.pay_date, row.pay_amount, 'NO' if row.pay_date.__ge__(date.today()) else 'YES']) client_in_loan = [] try: query3 = (Lend.select().where(Lend.loan_number == self.handle_loan)) except Exception as e: msg = repr(e) else: for row in query3: client_in_loan.append(row.client_id_number) finally: print(msg) return msg, loan_info, client_in_loan, loan_payments def insert_loan_info(self, loan_number, overall_issue_amount, overall_issue_time, add_loan_client, payments_date, payments_amount): # necessary info complete and valid -> add # you cannot use self.handle_client # ignore unnecessary info, opendate = visitdate # check: one cheque and one saving for one client in one branch # loan lend payments charge if not overall_issue_time.isdigit(): msg = '贷款信息不符1' else: overall_issue_time = int(overall_issue_time) if overall_issue_time < 1 or not str(overall_issue_amount).replace('.', '0').isdigit() or len(payments_amount) != overall_issue_time or sum([float(payments_amount[i]) for i in range(overall_issue_time)]) != float(overall_issue_amount) or not all([str(x).replace('.', '0').isdigit() for x in payments_amount]): print(overall_issue_time, str(overall_issue_amount).replace('.', '0').isdigit()) print(len(payments_amount) != overall_issue_time) print(sum([float(payments_amount[i]) for i in range(overall_issue_time)]) != float(overall_issue_amount) ) print(all([str(x).replace('.', '0').isdigit() for x in payments_amount])) msg = '贷款信息不符' else: issue_now_amount = sum([float(payments_amount[i]) if datetime.date(datetime.strptime(payments_date[i], '%Y-%m-%d')).__le__(date.today()) else 0 for i in range(overall_issue_time)]) try: loan_item = Loan.create(branch_bank_name=self.branch_name, issue_now_amount=issue_now_amount, issue_overall_amount=float(overall_issue_amount), loan_number=loan_number) except Exception as e: msg = repr(e) else: # lend try: for _client in add_loan_client: lend_item = Lend.create(loan_number=loan_number, client_id_number=_client) except IntegrityError as e: loan_item.delete_instance() msg = '数据插入出错' print(repr(e)) except Exception as e: loan_item.delete_instance() msg = repr(e) else: # payments try: for i in range(overall_issue_time): payments_item = Payments.create(loan_number=loan_number,pay_amount=float(payments_amount[i]),pay_date=payments_date[i]) except Exception as e: loan_item.delete_instance() msg = repr(e) else: try: possess_charge = {} for _client2 in add_loan_client: try: query1 = Charge.select().where((Charge.branchbankname == self.branch_name) & (Charge.client_id_number == _client2) & (Charge.loan_or_account == 0)).get() except Exception as e: loan_item.delete_instance() msg = repr(e) else: # previous account charge_item = Charge.create(branchbankname=self.branch_name, client_id_number=_client2, staff_id_number=query1.staff_id_number, loan_or_account=1, cheque_or_saving=3) possess_charge[_client2] = charge_item except IntegrityError as e: for _client3 in possess_charge.keys(): possess_charge[_client3].delete_instance() loan_item.delete_instance() msg = '唯一键检查出错' print(repr(e)) except Exception as e: for _client4 in possess_charge.keys(): possess_charge[_client4].delete_instance() loan_item.delete_instance() msg = repr(e) print(repr(e)) else: try: query3 = BankName.select().where(BankName.branch_bank_name == self.branch_name).get() query3.assets -= issue_now_amount query3.save() except Exception as e: for _client4 in possess_charge.keys(): possess_charge[_client4].delete_instance() loan_item.delete_instance() msg = repr(e) print(repr(e)) else: msg = 'success' finally: print('insert_saving_info: ' + msg) print(msg) return msg def delete_loan(self): # you can delete -> delete try: these_client = Lend.select().where(Lend.loan_number == self.handle_loan) cursor4 = database.execute(these_client) this_loan = Loan.select().where(Loan.loan_number == self.handle_loan).get() except Exception as e: msg = repr(e) else: if this_loan.issue_now_amount == this_loan.issue_overall_amount: try: Loan.delete().where(Loan.loan_number == self.handle_loan).execute() except Exception as e: msg = repr(e) else: msg = 'success' for (client_id_number, loan_number) in cursor4: print(client_id_number, loan_number) # try: # query3 = Lend.select().where((Lend.loan_number != self.handle_loan) & (Lend.client_id_number == client_id_number)).get() # except Lend.DoesNotExist as e: try: q = Charge.delete().where((Charge.client_id_number == client_id_number) & (Charge.branchbankname == self.branch_name) & (Charge.loan_or_account == 1)).execute() except Exception as e: msg = repr(e) # except Exception as e: # msg = repr(e) # else: # pass else: msg = '不允许删除' finally: print(msg) return msg class Overall(object): # return msg, brief info def __init__(self, branch_name): self.branch_name = branch_name def get_overall_bank(self): banks = [] try: query = BankName.select() except Exception as e: msg = repr(e) else: for branch in query: banks.append([branch.branch_bank_name, branch.bank_city, branch.assets]) msg = 'success' finally: print(msg) return msg, banks def get_overall_client(self): client = [] try: query = BankClient.select() except Exception as e: msg = repr(e) else: for _client in query: client.append([_client.client_name, _client.client_id_number, _client.contactor_phone]) msg = 'success' finally: print(msg) return msg, client def get_overall_account(self): account = [] try: query1 = Cheque.select() query2 = Saving.select() except Exception as e: msg = repr(e) else: for chequeA in query1: account.append([chequeA.branch_bank_name, chequeA.account_number, '支票']) for savingA in query2: account.append([savingA.branch_bank_name, savingA.account_number, '储蓄']) msg = 'success' finally: print(msg) return msg, account def get_overall_loan(self): loan = [] try: query = Loan.select() except Exception as e: msg = repr(e) else: for _loan in query: loan.append(['未开始发放' if _loan.issue_now_amount == 0 else ('发放中' if _loan.issue_now_amount < _loan.issue_overall_amount else '已全部发放'), _loan.loan_number, _loan.branch_bank_name]) msg = 'success' finally: print(msg) return msg, loan def generate_dateframe(start_time, branch_name, sta_select): end_time = date.today() timetrial = {'M':pd.Series(pd.period_range(start=(datetime.date(datetime.strptime(start_time, '%Y-%m-%d'))-timedelta(days=31)), freq='M', periods=12)), 'Q': pd.Series(pd.period_range(start=(datetime.date(datetime.strptime(start_time, '%Y-%m-%d'))-timedelta(days=91)), freq='Q', periods=12)), 'Y':pd.Series(pd.period_range(start=(datetime.date(datetime.strptime(start_time, '%Y-%m-%d'))-timedelta(days=365)), freq='Y', periods=4))} # a = .iloc[i].strftime('%Y-%m-%d') # b = datetime.date(datetime.strptime(a, '%Y-%m-%d')) # select between index = timetrial[sta_select] msg = 'success' saving_amount = [] cheque_amount = [] loan_amount = [] overall_amount = [] business = [] for i in range(len(index)-1): time_select = (datetime.date(datetime.strptime(index.iloc[i].strftime('%Y-%m-%d'), '%Y-%m-%d')), datetime.date(datetime.strptime(index.iloc[i + 1].strftime('%Y-%m-%d'), '%Y-%m-%d'))) new_cheque = [0, 0] new_saving = [0, 0] new_loan = [0, 0] try: query3 = Payments.select().where((Payments.pay_date >= time_select[0]) & (Payments.pay_date < time_select[1])) except Payments.DoesNotExist as e: msg = 'success' except Exception as e: msg = repr(e) else: for _loan in query3: new_loan[0] += 1 new_loan[1] -= _loan.pay_amount # business.append(new_cheque[0]+new_saving[0]+new_loan[0]) try: query1 = Cheque.select().where((Cheque.open_date >= time_select[0]) & (Cheque.open_date < time_select[1])) except Cheque.DoesNotExist as e: msg = 'success' except Exception as e: msg = repr(e) else: for _cheque in query1: new_cheque[0] += 1 new_cheque[1] += _cheque.remaining_sum try: query2 = Saving.select().where((Saving.open_date >= time_select[0]) & (Saving.open_date < time_select[1])) except Saving.DoesNotExist as e: msg = 'success' except Exception as e: msg = repr(e) else: for _saving in query2: new_saving[0] += 1 new_saving[1] += _saving.remaining_sum saving_amount.append(new_saving[1]) cheque_amount.append(new_cheque[1]) loan_amount.append(new_loan[1]) overall_amount.append(new_cheque[1] + new_loan[1] + new_saving[1]) business.append(new_cheque[0] + new_loan[0] + new_saving[0]) rep = pd.DataFrame({'PERIOD':index.head(len(index)-1),'SAVING': saving_amount, 'CHEQUE': cheque_amount, 'LOAN': loan_amount, 'ASSETS': overall_amount, 'BUSINESS': business}, index=index.head(len(index)-1)) print(rep) pdp.ProfileReport(rep).to_file(output_file="./templates/statistic.html") return msg
35,451
21
586
581d89ac51782d24c0741d4f545d4bc1a7f38aa1
297
py
Python
tests/settings_test.py
jamesperes/autonomiaBot
bbc90497d29fa16a85477281f87ceaeb7c3e55fc
[ "MIT" ]
8
2018-03-22T21:46:30.000Z
2021-04-07T07:06:05.000Z
tests/settings_test.py
jamesperes/autonomiaBot
bbc90497d29fa16a85477281f87ceaeb7c3e55fc
[ "MIT" ]
42
2018-03-23T17:20:38.000Z
2021-06-02T01:19:51.000Z
tests/settings_test.py
jamesperes/autonomiaBot
bbc90497d29fa16a85477281f87ceaeb7c3e55fc
[ "MIT" ]
3
2018-03-26T20:30:53.000Z
2020-01-18T13:52:32.000Z
# application configuration from autonomia.settings import * # noqa DEBUG = True TESTING = True LOG_LEVEL = "DEBUG" WEBHOOK_DOMAIN = "localhost:5000" WEBHOOK_PATH = "hook" CHAT_ID = -123456 # Third party configuration API_TOKEN = "133505823:AAHZFMHno3mzVLErU5b5jJvaeG--qUyLyG0" REDIS_URL = ""
19.8
59
0.767677
# application configuration from autonomia.settings import * # noqa DEBUG = True TESTING = True LOG_LEVEL = "DEBUG" WEBHOOK_DOMAIN = "localhost:5000" WEBHOOK_PATH = "hook" CHAT_ID = -123456 # Third party configuration API_TOKEN = "133505823:AAHZFMHno3mzVLErU5b5jJvaeG--qUyLyG0" REDIS_URL = ""
0
0
0
af56ccce5a7ef323f67d85aaae8849e66a22a7f8
27,895
py
Python
napalm_yang/models/openconfig/network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path_/candidate_secondary_paths/candidate_secondary_path/state/__init__.py
ckishimo/napalm-yang
8f2bd907bd3afcde3c2f8e985192de74748baf6c
[ "Apache-2.0" ]
64
2016-10-20T15:47:18.000Z
2021-11-11T11:57:32.000Z
napalm_yang/models/openconfig/network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path_/candidate_secondary_paths/candidate_secondary_path/state/__init__.py
ckishimo/napalm-yang
8f2bd907bd3afcde3c2f8e985192de74748baf6c
[ "Apache-2.0" ]
126
2016-10-05T10:36:14.000Z
2019-05-15T08:43:23.000Z
napalm_yang/models/openconfig/network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path_/candidate_secondary_paths/candidate_secondary_path/state/__init__.py
ckishimo/napalm-yang
8f2bd907bd3afcde3c2f8e985192de74748baf6c
[ "Apache-2.0" ]
63
2016-11-07T15:23:08.000Z
2021-09-22T14:41:16.000Z
# -*- coding: utf-8 -*- from operator import attrgetter from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType from pyangbind.lib.yangtypes import RestrictedClassType from pyangbind.lib.yangtypes import TypedListType from pyangbind.lib.yangtypes import YANGBool from pyangbind.lib.yangtypes import YANGListType from pyangbind.lib.yangtypes import YANGDynClass from pyangbind.lib.yangtypes import ReferenceType from pyangbind.lib.base import PybindBase from collections import OrderedDict from decimal import Decimal from bitarray import bitarray import six # PY3 support of some PY2 keywords (needs improved) if six.PY3: import builtins as __builtin__ long = int elif six.PY2: import __builtin__ class state(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/mpls/lsps/constrained-path/tunnels/tunnel/p2p-tunnel-attributes/p2p-primary-path/p2p-primary-path/candidate-secondary-paths/candidate-secondary-path/state. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Operational state parameters relating to the candidate secondary path """ __slots__ = ( "_path_helper", "_extmethods", "__secondary_path", "__priority", "__active" ) _yang_name = "state" _pybind_generated_by = "container" def _get_secondary_path(self): """ Getter method for secondary_path, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path/state/secondary_path (leafref) YANG Description: A reference to the secondary path that should be utilised when the containing primary path option is in use """ return self.__secondary_path def _set_secondary_path(self, v, load=False): """ Setter method for secondary_path, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path/state/secondary_path (leafref) If this variable is read-only (config: false) in the source YANG file, then _set_secondary_path is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_secondary_path() directly. YANG Description: A reference to the secondary path that should be utilised when the containing primary path option is in use """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=six.text_type, is_leaf=True, yang_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="leafref", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """secondary_path must be of a type compatible with leafref""", "defined-type": "leafref", "generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=False)""", } ) self.__secondary_path = t if hasattr(self, "_set"): self._set() def _get_priority(self): """ Getter method for priority, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path/state/priority (uint16) YANG Description: The priority of the specified secondary path option. Higher priority options are less preferable - such that a secondary path reference with a priority of 0 is the most preferred """ return self.__priority def _set_priority(self, v, load=False): """ Setter method for priority, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path/state/priority (uint16) If this variable is read-only (config: false) in the source YANG file, then _set_priority is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_priority() directly. YANG Description: The priority of the specified secondary path option. Higher priority options are less preferable - such that a secondary path reference with a priority of 0 is the most preferred """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 ), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="uint16", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """priority must be of a type compatible with uint16""", "defined-type": "uint16", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint16', is_config=False)""", } ) self.__priority = t if hasattr(self, "_set"): self._set() def _get_active(self): """ Getter method for active, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path/state/active (boolean) YANG Description: Indicates the current active path option that has been selected of the candidate secondary paths """ return self.__active def _set_active(self, v, load=False): """ Setter method for active, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path/state/active (boolean) If this variable is read-only (config: false) in the source YANG file, then _set_active is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_active() directly. YANG Description: Indicates the current active path option that has been selected of the candidate secondary paths """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=YANGBool, is_leaf=True, yang_name="active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="boolean", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """active must be of a type compatible with boolean""", "defined-type": "boolean", "generated-type": """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""", } ) self.__active = t if hasattr(self, "_set"): self._set() secondary_path = __builtin__.property(_get_secondary_path) priority = __builtin__.property(_get_priority) active = __builtin__.property(_get_active) _pyangbind_elements = OrderedDict( [("secondary_path", secondary_path), ("priority", priority), ("active", active)] ) class state(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/mpls/lsps/constrained-path/tunnels/tunnel/p2p-tunnel-attributes/p2p-primary-path/p2p-primary-path/candidate-secondary-paths/candidate-secondary-path/state. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Operational state parameters relating to the candidate secondary path """ __slots__ = ( "_path_helper", "_extmethods", "__secondary_path", "__priority", "__active" ) _yang_name = "state" _pybind_generated_by = "container" def _get_secondary_path(self): """ Getter method for secondary_path, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path/state/secondary_path (leafref) YANG Description: A reference to the secondary path that should be utilised when the containing primary path option is in use """ return self.__secondary_path def _set_secondary_path(self, v, load=False): """ Setter method for secondary_path, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path/state/secondary_path (leafref) If this variable is read-only (config: false) in the source YANG file, then _set_secondary_path is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_secondary_path() directly. YANG Description: A reference to the secondary path that should be utilised when the containing primary path option is in use """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=six.text_type, is_leaf=True, yang_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="leafref", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """secondary_path must be of a type compatible with leafref""", "defined-type": "leafref", "generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=False)""", } ) self.__secondary_path = t if hasattr(self, "_set"): self._set() def _get_priority(self): """ Getter method for priority, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path/state/priority (uint16) YANG Description: The priority of the specified secondary path option. Higher priority options are less preferable - such that a secondary path reference with a priority of 0 is the most preferred """ return self.__priority def _set_priority(self, v, load=False): """ Setter method for priority, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path/state/priority (uint16) If this variable is read-only (config: false) in the source YANG file, then _set_priority is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_priority() directly. YANG Description: The priority of the specified secondary path option. Higher priority options are less preferable - such that a secondary path reference with a priority of 0 is the most preferred """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 ), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="uint16", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """priority must be of a type compatible with uint16""", "defined-type": "uint16", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint16', is_config=False)""", } ) self.__priority = t if hasattr(self, "_set"): self._set() def _get_active(self): """ Getter method for active, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path/state/active (boolean) YANG Description: Indicates the current active path option that has been selected of the candidate secondary paths """ return self.__active def _set_active(self, v, load=False): """ Setter method for active, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path/state/active (boolean) If this variable is read-only (config: false) in the source YANG file, then _set_active is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_active() directly. YANG Description: Indicates the current active path option that has been selected of the candidate secondary paths """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=YANGBool, is_leaf=True, yang_name="active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="boolean", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """active must be of a type compatible with boolean""", "defined-type": "boolean", "generated-type": """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""", } ) self.__active = t if hasattr(self, "_set"): self._set() secondary_path = __builtin__.property(_get_secondary_path) priority = __builtin__.property(_get_priority) active = __builtin__.property(_get_active) _pyangbind_elements = OrderedDict( [("secondary_path", secondary_path), ("priority", priority), ("active", active)] )
42.587786
423
0.624628
# -*- coding: utf-8 -*- from operator import attrgetter from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType from pyangbind.lib.yangtypes import RestrictedClassType from pyangbind.lib.yangtypes import TypedListType from pyangbind.lib.yangtypes import YANGBool from pyangbind.lib.yangtypes import YANGListType from pyangbind.lib.yangtypes import YANGDynClass from pyangbind.lib.yangtypes import ReferenceType from pyangbind.lib.base import PybindBase from collections import OrderedDict from decimal import Decimal from bitarray import bitarray import six # PY3 support of some PY2 keywords (needs improved) if six.PY3: import builtins as __builtin__ long = int elif six.PY2: import __builtin__ class state(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/mpls/lsps/constrained-path/tunnels/tunnel/p2p-tunnel-attributes/p2p-primary-path/p2p-primary-path/candidate-secondary-paths/candidate-secondary-path/state. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Operational state parameters relating to the candidate secondary path """ __slots__ = ( "_path_helper", "_extmethods", "__secondary_path", "__priority", "__active" ) _yang_name = "state" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__secondary_path = YANGDynClass( base=six.text_type, is_leaf=True, yang_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="leafref", is_config=False, ) self.__priority = YANGDynClass( base=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 ), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="uint16", is_config=False, ) self.__active = YANGDynClass( base=YANGBool, is_leaf=True, yang_name="active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="boolean", is_config=False, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "mpls", "lsps", "constrained-path", "tunnels", "tunnel", "p2p-tunnel-attributes", "p2p-primary-path", "p2p-primary-path", "candidate-secondary-paths", "candidate-secondary-path", "state", ] def _get_secondary_path(self): """ Getter method for secondary_path, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path/state/secondary_path (leafref) YANG Description: A reference to the secondary path that should be utilised when the containing primary path option is in use """ return self.__secondary_path def _set_secondary_path(self, v, load=False): """ Setter method for secondary_path, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path/state/secondary_path (leafref) If this variable is read-only (config: false) in the source YANG file, then _set_secondary_path is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_secondary_path() directly. YANG Description: A reference to the secondary path that should be utilised when the containing primary path option is in use """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=six.text_type, is_leaf=True, yang_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="leafref", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """secondary_path must be of a type compatible with leafref""", "defined-type": "leafref", "generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=False)""", } ) self.__secondary_path = t if hasattr(self, "_set"): self._set() def _unset_secondary_path(self): self.__secondary_path = YANGDynClass( base=six.text_type, is_leaf=True, yang_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="leafref", is_config=False, ) def _get_priority(self): """ Getter method for priority, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path/state/priority (uint16) YANG Description: The priority of the specified secondary path option. Higher priority options are less preferable - such that a secondary path reference with a priority of 0 is the most preferred """ return self.__priority def _set_priority(self, v, load=False): """ Setter method for priority, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path/state/priority (uint16) If this variable is read-only (config: false) in the source YANG file, then _set_priority is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_priority() directly. YANG Description: The priority of the specified secondary path option. Higher priority options are less preferable - such that a secondary path reference with a priority of 0 is the most preferred """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 ), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="uint16", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """priority must be of a type compatible with uint16""", "defined-type": "uint16", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint16', is_config=False)""", } ) self.__priority = t if hasattr(self, "_set"): self._set() def _unset_priority(self): self.__priority = YANGDynClass( base=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 ), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="uint16", is_config=False, ) def _get_active(self): """ Getter method for active, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path/state/active (boolean) YANG Description: Indicates the current active path option that has been selected of the candidate secondary paths """ return self.__active def _set_active(self, v, load=False): """ Setter method for active, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path/state/active (boolean) If this variable is read-only (config: false) in the source YANG file, then _set_active is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_active() directly. YANG Description: Indicates the current active path option that has been selected of the candidate secondary paths """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=YANGBool, is_leaf=True, yang_name="active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="boolean", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """active must be of a type compatible with boolean""", "defined-type": "boolean", "generated-type": """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""", } ) self.__active = t if hasattr(self, "_set"): self._set() def _unset_active(self): self.__active = YANGDynClass( base=YANGBool, is_leaf=True, yang_name="active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="boolean", is_config=False, ) secondary_path = __builtin__.property(_get_secondary_path) priority = __builtin__.property(_get_priority) active = __builtin__.property(_get_active) _pyangbind_elements = OrderedDict( [("secondary_path", secondary_path), ("priority", priority), ("active", active)] ) class state(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/mpls/lsps/constrained-path/tunnels/tunnel/p2p-tunnel-attributes/p2p-primary-path/p2p-primary-path/candidate-secondary-paths/candidate-secondary-path/state. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Operational state parameters relating to the candidate secondary path """ __slots__ = ( "_path_helper", "_extmethods", "__secondary_path", "__priority", "__active" ) _yang_name = "state" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__secondary_path = YANGDynClass( base=six.text_type, is_leaf=True, yang_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="leafref", is_config=False, ) self.__priority = YANGDynClass( base=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 ), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="uint16", is_config=False, ) self.__active = YANGDynClass( base=YANGBool, is_leaf=True, yang_name="active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="boolean", is_config=False, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "mpls", "lsps", "constrained-path", "tunnels", "tunnel", "p2p-tunnel-attributes", "p2p-primary-path", "p2p-primary-path", "candidate-secondary-paths", "candidate-secondary-path", "state", ] def _get_secondary_path(self): """ Getter method for secondary_path, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path/state/secondary_path (leafref) YANG Description: A reference to the secondary path that should be utilised when the containing primary path option is in use """ return self.__secondary_path def _set_secondary_path(self, v, load=False): """ Setter method for secondary_path, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path/state/secondary_path (leafref) If this variable is read-only (config: false) in the source YANG file, then _set_secondary_path is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_secondary_path() directly. YANG Description: A reference to the secondary path that should be utilised when the containing primary path option is in use """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=six.text_type, is_leaf=True, yang_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="leafref", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """secondary_path must be of a type compatible with leafref""", "defined-type": "leafref", "generated-type": """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='leafref', is_config=False)""", } ) self.__secondary_path = t if hasattr(self, "_set"): self._set() def _unset_secondary_path(self): self.__secondary_path = YANGDynClass( base=six.text_type, is_leaf=True, yang_name="secondary-path", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="leafref", is_config=False, ) def _get_priority(self): """ Getter method for priority, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path/state/priority (uint16) YANG Description: The priority of the specified secondary path option. Higher priority options are less preferable - such that a secondary path reference with a priority of 0 is the most preferred """ return self.__priority def _set_priority(self, v, load=False): """ Setter method for priority, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path/state/priority (uint16) If this variable is read-only (config: false) in the source YANG file, then _set_priority is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_priority() directly. YANG Description: The priority of the specified secondary path option. Higher priority options are less preferable - such that a secondary path reference with a priority of 0 is the most preferred """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 ), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="uint16", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """priority must be of a type compatible with uint16""", "defined-type": "uint16", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..65535']},int_size=16), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint16', is_config=False)""", } ) self.__priority = t if hasattr(self, "_set"): self._set() def _unset_priority(self): self.__priority = YANGDynClass( base=RestrictedClassType( base_type=int, restriction_dict={"range": ["0..65535"]}, int_size=16 ), is_leaf=True, yang_name="priority", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="uint16", is_config=False, ) def _get_active(self): """ Getter method for active, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path/state/active (boolean) YANG Description: Indicates the current active path option that has been selected of the candidate secondary paths """ return self.__active def _set_active(self, v, load=False): """ Setter method for active, mapped from YANG variable /network_instances/network_instance/mpls/lsps/constrained_path/tunnels/tunnel/p2p_tunnel_attributes/p2p_primary_path/p2p_primary_path/candidate_secondary_paths/candidate_secondary_path/state/active (boolean) If this variable is read-only (config: false) in the source YANG file, then _set_active is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_active() directly. YANG Description: Indicates the current active path option that has been selected of the candidate secondary paths """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=YANGBool, is_leaf=True, yang_name="active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="boolean", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """active must be of a type compatible with boolean""", "defined-type": "boolean", "generated-type": """YANGDynClass(base=YANGBool, is_leaf=True, yang_name="active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""", } ) self.__active = t if hasattr(self, "_set"): self._set() def _unset_active(self): self.__active = YANGDynClass( base=YANGBool, is_leaf=True, yang_name="active", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="boolean", is_config=False, ) secondary_path = __builtin__.property(_get_secondary_path) priority = __builtin__.property(_get_priority) active = __builtin__.property(_get_active) _pyangbind_elements = OrderedDict( [("secondary_path", secondary_path), ("priority", priority), ("active", active)] )
9,200
0
270
89fc6f1de5be83c46722245878ac20f1102c60e8
555
py
Python
socialite/preprocess/get_max_degree.py
Wangqge/PowerLog_ae
8546afbcb9a77d516e8c3f0dfbaf2041a4b888f9
[ "Apache-2.0" ]
null
null
null
socialite/preprocess/get_max_degree.py
Wangqge/PowerLog_ae
8546afbcb9a77d516e8c3f0dfbaf2041a4b888f9
[ "Apache-2.0" ]
null
null
null
socialite/preprocess/get_max_degree.py
Wangqge/PowerLog_ae
8546afbcb9a77d516e8c3f0dfbaf2041a4b888f9
[ "Apache-2.0" ]
null
null
null
import sys if len(sys.argv) != 2: print 'usage: [edge pair input path]' exit(1) with open(sys.argv[1], 'r') as fi: node_dict = {} for line in fi: tmp = line.split() src = int(tmp[0]) if src not in node_dict: node_dict[src] = 1 else: node_dict[src] += 1 max_degree = 0 max_node = 0 for node in node_dict: if node_dict[node] > max_node: max_degree = node_dict[node] max_node = node print max_node print 'degree ' + str(max_degree)
23.125
41
0.538739
import sys if len(sys.argv) != 2: print 'usage: [edge pair input path]' exit(1) with open(sys.argv[1], 'r') as fi: node_dict = {} for line in fi: tmp = line.split() src = int(tmp[0]) if src not in node_dict: node_dict[src] = 1 else: node_dict[src] += 1 max_degree = 0 max_node = 0 for node in node_dict: if node_dict[node] > max_node: max_degree = node_dict[node] max_node = node print max_node print 'degree ' + str(max_degree)
0
0
0
1658ee280f3440ab4466219888be9de4ead0bb43
2,159
py
Python
derivativetest.py
syangliu/Newton-MR-grad
bf17cac404f0d0c92d5852e265162cc82fe3fefd
[ "Apache-2.0" ]
1
2022-03-31T05:15:31.000Z
2022-03-31T05:15:31.000Z
derivativetest.py
syangliu/Newton-MR
bf17cac404f0d0c92d5852e265162cc82fe3fefd
[ "Apache-2.0" ]
null
null
null
derivativetest.py
syangliu/Newton-MR
bf17cac404f0d0c92d5852e265162cc82fe3fefd
[ "Apache-2.0" ]
1
2022-03-20T00:11:03.000Z
2022-03-20T00:11:03.000Z
import numpy as np import numpy.random as rand import matplotlib.pyplot as plt from numpy.linalg import norm def derivativetest(fun, x0): """ Test the gradient and Hessian of a function. A large proportion parallel in the middle of both plots means accuraccy. INPUTS: fun: a function handle that gives f, g, Hv x0: starting point OUTPUTS: derivative test plots """ x0 = x0.reshape(len(x0),1) fun0 = fun(x0) dx = rand.randn(len(x0),1) M = 20; dxs = np.zeros((M,1)) firsterror = np.zeros((M,1)) order1 = np.zeros((M-1,1)) seconderror = np.zeros((M,1)) order2 = np.zeros((M-1,1)) for i in range(M): x = x0 + dx fun1 = fun(x) H0 = Ax(fun0[2],dx) firsterror[i] = abs(fun1[0] - (fun0[0] + np.dot( dx.T, fun0[1])))/abs(fun0[0]) seconderror[i] = abs(fun1[0] - (fun0[0] + np.dot( dx.T, fun0[1]) + 0.5* np.dot(dx.T, H0)))/abs(fun0[0]) print('First Order Error is %8.2e; Second Order Error is %8.2e'% ( firsterror[i], seconderror[i])) if i > 0: order1[i-1] = np.log2(firsterror[i-1]/firsterror[i]) order2[i-1] = np.log2(seconderror[i-1]/seconderror[i]) dxs[i] = norm(dx) dx = dx/2 step = [2**(-i-1) for i in range(M)] plt.figure(figsize=(12,8)) plt.subplot(221) plt.loglog(step, abs(firsterror),'b', label = '1st Order Err') plt.loglog(step, dxs**2,'r', label = 'order') plt.gca().invert_xaxis() plt.legend() plt.subplot(222) plt.semilogx(step[1:], order1,'b', label = '1st Order') plt.gca().invert_xaxis() plt.legend() plt.subplot(223) plt.loglog(step, abs(seconderror),'b', label = '2nd Order Err') plt.loglog(step, dxs**3,'r', label = 'Order') plt.gca().invert_xaxis() plt.legend() plt.subplot(224) plt.semilogx(step[1:], order2,'b', label = '2nd Order') plt.gca().invert_xaxis() plt.legend() return plt.show()
29.175676
76
0.548865
import numpy as np import numpy.random as rand import matplotlib.pyplot as plt from numpy.linalg import norm def derivativetest(fun, x0): """ Test the gradient and Hessian of a function. A large proportion parallel in the middle of both plots means accuraccy. INPUTS: fun: a function handle that gives f, g, Hv x0: starting point OUTPUTS: derivative test plots """ x0 = x0.reshape(len(x0),1) fun0 = fun(x0) dx = rand.randn(len(x0),1) M = 20; dxs = np.zeros((M,1)) firsterror = np.zeros((M,1)) order1 = np.zeros((M-1,1)) seconderror = np.zeros((M,1)) order2 = np.zeros((M-1,1)) for i in range(M): x = x0 + dx fun1 = fun(x) H0 = Ax(fun0[2],dx) firsterror[i] = abs(fun1[0] - (fun0[0] + np.dot( dx.T, fun0[1])))/abs(fun0[0]) seconderror[i] = abs(fun1[0] - (fun0[0] + np.dot( dx.T, fun0[1]) + 0.5* np.dot(dx.T, H0)))/abs(fun0[0]) print('First Order Error is %8.2e; Second Order Error is %8.2e'% ( firsterror[i], seconderror[i])) if i > 0: order1[i-1] = np.log2(firsterror[i-1]/firsterror[i]) order2[i-1] = np.log2(seconderror[i-1]/seconderror[i]) dxs[i] = norm(dx) dx = dx/2 step = [2**(-i-1) for i in range(M)] plt.figure(figsize=(12,8)) plt.subplot(221) plt.loglog(step, abs(firsterror),'b', label = '1st Order Err') plt.loglog(step, dxs**2,'r', label = 'order') plt.gca().invert_xaxis() plt.legend() plt.subplot(222) plt.semilogx(step[1:], order1,'b', label = '1st Order') plt.gca().invert_xaxis() plt.legend() plt.subplot(223) plt.loglog(step, abs(seconderror),'b', label = '2nd Order Err') plt.loglog(step, dxs**3,'r', label = 'Order') plt.gca().invert_xaxis() plt.legend() plt.subplot(224) plt.semilogx(step[1:], order2,'b', label = '2nd Order') plt.gca().invert_xaxis() plt.legend() return plt.show() def Ax(A, x): if callable(A): Ax = A(x) else: Ax =A.dot(x) return Ax
75
0
23
dc248dbf9ee291025d20284dbe3dde3da67d21fa
6,222
py
Python
dt_stripe/migrations/0001_initial.py
itsnamgyu/api-demo
ddf726928bd7f1021143c4dbb530e3017a3edda9
[ "MIT" ]
1
2019-06-02T08:20:38.000Z
2019-06-02T08:20:38.000Z
dt_stripe/migrations/0001_initial.py
itsnamgyu/api-demo
ddf726928bd7f1021143c4dbb530e3017a3edda9
[ "MIT" ]
12
2019-07-21T18:40:35.000Z
2019-10-05T19:57:49.000Z
dt_stripe/migrations/0001_initial.py
itsnamgyu/django-template
20f64974e0dda69cf8dcf0dac9e0a309f200fb61
[ "MIT" ]
null
null
null
# Generated by Django 2.2.4 on 2019-09-23 23:32 from django.db import migrations, models import django.db.models.deletion
69.133333
272
0.624558
# Generated by Django 2.2.4 on 2019-09-23 23:32 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ] operations = [ migrations.CreateModel( name='Customer', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('stripe_id', models.CharField(db_index=True, help_text='Id of Stripe Customer', max_length=128, verbose_name='id')), ('name', models.CharField(blank=True, max_length=128, null=True, verbose_name='name')), ('email', models.EmailField(blank=True, max_length=254, null=True, verbose_name='email')), ('description', models.TextField(blank=True, help_text='Description for admins', null=True, verbose_name='description')), ('source_status', models.CharField(choices=[('NA', 'None'), ('FA', 'Source payment failed'), ('AV', 'Source available')], default='NA', max_length=2, verbose_name='source status')), ('default_source_id', models.CharField(max_length=128, null=True, verbose_name='default source id')), ('default_source_brand', models.TextField(null=True, verbose_name='default source brand')), ('default_source_last4', models.CharField(max_length=4, null=True, verbose_name='default source last 4 digits')), ], ), migrations.CreateModel( name='Plan', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('stripe_id', models.CharField(db_index=True, help_text='Id of Stripe Plan', max_length=128, verbose_name='id')), ('amount', models.IntegerField(verbose_name='amount')), ('currency', models.CharField(default='usd', max_length=3, verbose_name='currency')), ('interval', models.CharField(choices=[('day', 'Daily'), ('week', 'Weekly'), ('month', 'Monthly'), ('year', 'Yearly')], max_length=16, verbose_name='interval')), ('interval_count', models.IntegerField(default=1, help_text='Number of intervals per billing cycle. I.e., interval=month and interval_count=3 for 3 months.', verbose_name='count')), ('name', models.CharField(help_text='Local name for use by admins', max_length=128, null=True, verbose_name='name')), ('description', models.TextField(blank=True, help_text='Local description for use by admins', null=True, verbose_name='description')), ], ), migrations.CreateModel( name='Product', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('stripe_id', models.CharField(db_index=True, help_text='Id of Stripe Product', max_length=128, verbose_name='id')), ('name', models.CharField(max_length=128, verbose_name='name')), ('description', models.TextField(blank=True, help_text="Description to be shown to the user. Only for products of type 'good'.", null=True, verbose_name='description')), ('product_type', models.CharField(choices=[('good', 'Good'), ('service', 'Service')], max_length=16, verbose_name='type')), ], ), migrations.CreateModel( name='Subscription', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('stripe_id', models.CharField(db_index=True, help_text='Id of Stripe Subscription', max_length=128, verbose_name='id')), ('status', models.CharField(choices=[('NA', 'No subscription'), ('AV', 'Active'), ('CE', 'Active until end of billing cycle)'), ('CX', 'Canceled due to expiry'), ('CP', 'Canceled due to payment issue')], default='NA', max_length=2, verbose_name='status')), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='subscriptions', to='dt_stripe.Customer')), ('plan', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='subscriptions', to='dt_stripe.Plan')), ], ), migrations.CreateModel( name='SKU', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('stripe_id', models.CharField(db_index=True, help_text='Id of Stripe SKU', max_length=128, verbose_name='id')), ('price', models.IntegerField(verbose_name='price')), ('currency', models.CharField(default='usd', max_length=3, verbose_name='currency')), ('name', models.CharField(help_text='Local name for use by admins', max_length=128, null=True, verbose_name='name')), ('description', models.TextField(blank=True, help_text='Local description for use by admins', null=True, verbose_name='description')), ('product', models.ForeignKey(on_delete='CASCADE', related_name='skus', to='dt_stripe.Product')), ], ), migrations.AddField( model_name='plan', name='product', field=models.ForeignKey(on_delete='CASCADE', related_name='plans', to='dt_stripe.Product'), ), migrations.CreateModel( name='Order', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('stripe_id', models.CharField(db_index=True, help_text='Id of Stripe Order', max_length=128, verbose_name='id')), ('date_created', models.DateTimeField(auto_now_add=True, verbose_name='date created')), ('customer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders', to='dt_stripe.Customer')), ('sku', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='orders', to='dt_stripe.SKU')), ], ), ]
0
6,075
23
ae20cf10f08092916ececf243f8198d8ffd52c36
2,915
py
Python
tests/sett/test_upgrade_crv_setts.py
shuklaayush/badger-system
1274eadbd0b0f3a02efbf40702719ce1d0a96c44
[ "MIT" ]
99
2020-12-02T08:40:48.000Z
2022-03-15T05:21:06.000Z
tests/sett/test_upgrade_crv_setts.py
shuklaayush/badger-system
1274eadbd0b0f3a02efbf40702719ce1d0a96c44
[ "MIT" ]
115
2020-12-15T07:15:39.000Z
2022-03-28T22:21:03.000Z
tests/sett/test_upgrade_crv_setts.py
shuklaayush/badger-system
1274eadbd0b0f3a02efbf40702719ce1d0a96c44
[ "MIT" ]
56
2020-12-11T06:50:04.000Z
2022-02-21T09:17:38.000Z
import pytest from brownie import Contract, chain, interface from rich.console import Console from helpers.sett.SnapshotManager import SnapshotManager from helpers.sett.simulation.SimulationManager import SimulationManager from helpers.registry import registry from helpers.registry.artifacts import artifacts from helpers.time_utils import days from helpers.token_utils import distribute_from_whales from scripts.upgrade.upgrade_crv_setts import ( queue_upgrade_crv_strat, CRV_SETTS_TO_UPGRADE, ) from scripts.systems.badger_system import connect_badger from tests.sett.generic_strategy_tests.strategy_flow import ( assert_deposit_withdraw_single_user_flow, assert_single_user_harvest_flow, assert_migrate_single_user, assert_withdraw_other, assert_single_user_harvest_flow_remove_fees, ) from tests.sett.generic_strategy_tests.strategy_permissions import ( assert_strategy_action_permissions, assert_strategy_config_permissions, assert_strategy_pausing_permissions, assert_sett_pausing_permissions, assert_sett_config_permissions, assert_controller_permissions, ) from tests.conftest import badger_single_sett from config.badger_config import badger_config console = Console() @pytest.mark.parametrize( "settID", CRV_SETTS_TO_UPGRADE, )
32.388889
71
0.797942
import pytest from brownie import Contract, chain, interface from rich.console import Console from helpers.sett.SnapshotManager import SnapshotManager from helpers.sett.simulation.SimulationManager import SimulationManager from helpers.registry import registry from helpers.registry.artifacts import artifacts from helpers.time_utils import days from helpers.token_utils import distribute_from_whales from scripts.upgrade.upgrade_crv_setts import ( queue_upgrade_crv_strat, CRV_SETTS_TO_UPGRADE, ) from scripts.systems.badger_system import connect_badger from tests.sett.generic_strategy_tests.strategy_flow import ( assert_deposit_withdraw_single_user_flow, assert_single_user_harvest_flow, assert_migrate_single_user, assert_withdraw_other, assert_single_user_harvest_flow_remove_fees, ) from tests.sett.generic_strategy_tests.strategy_permissions import ( assert_strategy_action_permissions, assert_strategy_config_permissions, assert_strategy_pausing_permissions, assert_sett_pausing_permissions, assert_sett_config_permissions, assert_controller_permissions, ) from tests.conftest import badger_single_sett from config.badger_config import badger_config console = Console() @pytest.mark.parametrize( "settID", CRV_SETTS_TO_UPGRADE, ) def test_simulation_after_upgrade_crv_setts(settID): # Upgrade crv strategy badger = connect_badger(badger_config.prod_json) """ TODO Get the Implementation before upgrade """ txFilename = queue_upgrade_crv_strat(badger, settID) # Sleep 2 days to pass timelock delay period. chain.sleep(2 * days(2)) badger.governance_execute_transaction(txFilename) """ TODO assert tht implementation has changed """ ## Object representing the sett we want and the mode we're in thisSettConfig = {"id": settID, "mode": "test"} ## Get badger so we can get info in sett and strats badger = badger_single_sett(thisSettConfig) ## We now have the want, we can mint some deployer = badger.deployer ## Mints token for us distribute_from_whales(deployer) snap = SnapshotManager(badger, settID) simulation = SimulationManager(badger, snap, settID) simulation.provision() # Randomize 30 actions. simulation.randomize(30) simulation.run() assert_deposit_withdraw_single_user_flow(thisSettConfig) assert_single_user_harvest_flow(thisSettConfig) assert_migrate_single_user(thisSettConfig) assert_withdraw_other(thisSettConfig) assert_single_user_harvest_flow_remove_fees(thisSettConfig) assert_strategy_action_permissions(thisSettConfig) assert_strategy_config_permissions(thisSettConfig) assert_strategy_pausing_permissions(thisSettConfig) assert_sett_pausing_permissions(thisSettConfig) assert_sett_config_permissions(thisSettConfig) assert_controller_permissions(thisSettConfig)
1,590
0
22
7dff39fe5f92e51a87e2ac41d42efa20ffd5a3e0
4,215
py
Python
wideq/refrigerator.py
davewatson91/wideq
3746447a1321d4a03e715c110bd8b150256cf6d1
[ "MIT" ]
2
2021-03-25T22:22:23.000Z
2021-04-20T05:44:26.000Z
wideq/refrigerator.py
davewatson91/wideq
3746447a1321d4a03e715c110bd8b150256cf6d1
[ "MIT" ]
1
2021-04-20T05:43:32.000Z
2021-04-20T05:46:56.000Z
wideq/refrigerator.py
davewatson91/wideq
3746447a1321d4a03e715c110bd8b150256cf6d1
[ "MIT" ]
3
2021-04-20T05:45:25.000Z
2021-06-04T23:57:17.000Z
import enum from typing import Optional from .client import Device from .util import lookup_enum class RefrigeratorDevice(Device): """A higher-level interface for a refrigerator.""" def set_temp_refrigerator_c(self, temp): """Set the refrigerator temperature in Celsius. """ value = self.model.enum_value('TempRefrigerator', str(temp)) self._set_control('RETM', value) def set_temp_freezer_c(self, temp): """Set the freezer temperature in Celsius. """ value = self.model.enum_value('TempFreezer', str(temp)) self._set_control('REFT', value) def poll(self) -> Optional['RefrigeratorStatus']: """Poll the device's current state. Monitoring must be started first with `monitor_start`. :returns: Either a `RefrigeratorStatus` instance or `None` if the status is not yet available. """ # Abort if monitoring has not started yet. if not hasattr(self, 'mon'): return None data = self.mon.poll() if data: res = self.model.decode_monitor(data) return RefrigeratorStatus(self, res) else: return None class RefrigeratorStatus(object): """Higher-level information about a refrigerator's current status. :param refrigerator: The RefrigeratorDevice instance. :param data: JSON data from the API. """ @property @property @property @property @property @property @property @property @property @property @property @property
29.893617
76
0.671886
import enum from typing import Optional from .client import Device from .util import lookup_enum class IcePlus(enum.Enum): OFF = "@CP_OFF_EN_W" ON = "@CP_ON_EN_W" ICE_PLUS = "@RE_TERM_ICE_PLUS_W" ICE_PLUS_FREEZE = "@RE_MAIN_SPEED_FREEZE_TERM_W" ICE_PLUS_OFF = "@CP_TERM_OFF_KO_W" class FreshAirFilter(enum.Enum): OFF = "@CP_TERM_OFF_KO_W" AUTO = "@RE_STATE_FRESH_AIR_FILTER_MODE_AUTO_W" POWER = "@RE_STATE_FRESH_AIR_FILTER_MODE_POWER_W" REPLACE_FILTER = "@RE_STATE_REPLACE_FILTER_W" SMARTCARE_ON = "@RE_STATE_SMART_SMART_CARE_ON" SMARTCARE_OFF = "@RE_STATE_SMART_SMART_CARE_OFF" SMARTCARE_WAIT = "@RE_STATE_SMART_SMART_CARE_WAIT" EMPTY = "" class SmartSavingMode(enum.Enum): OFF = "@CP_TERM_USE_NOT_W" NIGHT = "@RE_SMARTSAVING_MODE_NIGHT_W" CUSTOM = "@RE_SMARTSAVING_MODE_CUSTOM_W" SMART_GRID_OFF = "@CP_OFF_EN_W" SMART_GRID_DEMAND_RESPONSE = "@RE_TERM_DEMAND_RESPONSE_FUNCTIONALITY_W" SMART_GRID_CUSTOM = "@RE_TERM_DELAY_DEFROST_CAPABILITY_W" EMPTY = "" class RefrigeratorDevice(Device): """A higher-level interface for a refrigerator.""" def set_temp_refrigerator_c(self, temp): """Set the refrigerator temperature in Celsius. """ value = self.model.enum_value('TempRefrigerator', str(temp)) self._set_control('RETM', value) def set_temp_freezer_c(self, temp): """Set the freezer temperature in Celsius. """ value = self.model.enum_value('TempFreezer', str(temp)) self._set_control('REFT', value) def poll(self) -> Optional['RefrigeratorStatus']: """Poll the device's current state. Monitoring must be started first with `monitor_start`. :returns: Either a `RefrigeratorStatus` instance or `None` if the status is not yet available. """ # Abort if monitoring has not started yet. if not hasattr(self, 'mon'): return None data = self.mon.poll() if data: res = self.model.decode_monitor(data) return RefrigeratorStatus(self, res) else: return None class RefrigeratorStatus(object): """Higher-level information about a refrigerator's current status. :param refrigerator: The RefrigeratorDevice instance. :param data: JSON data from the API. """ def __init__(self, refrigerator: RefrigeratorDevice, data: dict): self.refrigerator = refrigerator self.data = data @property def temp_refrigerator_c(self): temp = lookup_enum('TempRefrigerator', self.data, self.refrigerator) return int(temp) @property def temp_freezer_c(self): temp = lookup_enum('TempFreezer', self.data, self.refrigerator) return int(temp) @property def ice_plus_status(self): status = lookup_enum('IcePlus', self.data, self.refrigerator) return IcePlus(status) @property def fresh_air_filter_status(self): status = lookup_enum('FreshAirFilter', self.data, self.refrigerator) return FreshAirFilter(status) @property def energy_saving_mode(self): mode = lookup_enum('SmartSavingMode', self.data, self.refrigerator) return SmartSavingMode(mode) @property def door_opened(self): state = lookup_enum('DoorOpenState', self.data, self.refrigerator) return state == "OPEN" @property def temp_unit(self): return lookup_enum('TempUnit', self.data, self.refrigerator) @property def energy_saving_enabled(self): mode = lookup_enum( 'SmartSavingModeStatus', self.data, self.refrigerator ) return mode == 'ON' @property def locked(self): status = lookup_enum('LockingStatus', self.data, self.refrigerator) return status == "LOCK" @property def active_saving_status(self): return self.data['ActiveSavingStatus'] @property def eco_enabled(self): eco = lookup_enum('EcoFriendly', self.data, self.refrigerator) return eco == "@CP_ON_EN_W" @property def water_filter_used_month(self): return self.data['WaterFilterUsedMonth']
1,326
872
408
75331b011bd2fbc98c3034f1d5e7ec1e561ea62b
780
py
Python
ursinanetworking/examples/QTChat/server.py
nutanstrek/UrsinaNetworking
f505f073f9a25f9f79fa0a1c059aeed6661377e0
[ "MIT" ]
38
2021-04-06T05:07:24.000Z
2022-03-29T01:44:10.000Z
ursinanetworking/examples/QTChat/server.py
nutanstrek/UrsinaNetworking
f505f073f9a25f9f79fa0a1c059aeed6661377e0
[ "MIT" ]
3
2021-04-11T02:24:19.000Z
2022-03-29T03:34:19.000Z
ursinanetworking/examples/QTChat/server.py
nutanstrek/UrsinaNetworking
f505f073f9a25f9f79fa0a1c059aeed6661377e0
[ "MIT" ]
6
2021-04-06T12:01:25.000Z
2022-03-28T20:12:10.000Z
from ursinanetworking import * Server = UrsinaNetworkingServer("localhost", 25565) @Server.event @Server.event while True: Server.process_net_events()
30
85
0.694872
from ursinanetworking import * Server = UrsinaNetworkingServer("localhost", 25565) def broadcast_message(message, author): Server.broadcast("receive_message", {"message" : message, "author" : author}) @Server.event def onClientConnected(client): client.registered = False #broadcast_message(f"{client} connected", "system") @Server.event def send_message(client, datas): if not client.registered: client.registered = True client.name = datas client.send_message("clear_chat", "") client.send_message("get_identity", {"id" : client.id, "name" : client.name}) broadcast_message(f"{datas} à rejoint le chat !", "système") else: broadcast_message(datas, client.name) while True: Server.process_net_events()
557
0
67
43e6bb65360a0bc2d24721dd4aa6582d161eb065
2,575
py
Python
src/day8.py
chao-mu/aoc2021
9cb0590a8de100f260a78f22e50d00d6acb13ae4
[ "CC0-1.0" ]
null
null
null
src/day8.py
chao-mu/aoc2021
9cb0590a8de100f260a78f22e50d00d6acb13ae4
[ "CC0-1.0" ]
null
null
null
src/day8.py
chao-mu/aoc2021
9cb0590a8de100f260a78f22e50d00d6acb13ae4
[ "CC0-1.0" ]
null
null
null
#!/usr/bin/env python3 from aoc2021.util import print_solutions, import_strs from math import inf from itertools import permutations import copy Numbers = { "abcefg": 0, "cf": 1, "acdeg": 2, "acdfg": 3, "bcdf": 4, "abdfg": 5, "abdefg": 6, "acf": 7, "abcdefg": 8, "abcdfg": 9 } if __name__ == "__main__": main()
22.198276
76
0.553398
#!/usr/bin/env python3 from aoc2021.util import print_solutions, import_strs from math import inf from itertools import permutations import copy def part_1(clock): uniqs = 0 for face in clock: _, signals = face for signal in signals: if len(signal) in [2, 4, 3, 7]: uniqs += 1 return uniqs Numbers = { "abcefg": 0, "cf": 1, "acdeg": 2, "acdfg": 3, "bcdf": 4, "abdfg": 5, "abdefg": 6, "acf": 7, "abcdefg": 8, "abcdfg": 9 } def part_2(clock): total = 0 for face in clock: patterns, signals = face assignments = backward_search({c: "*" for c in "abcdefg"}, patterns) number = "" for s in signals: correct_s = "".join(sorted(assignments[c] for c in s)) number += str(Numbers[correct_s]) total += int(number) return total def backward_search(assignments, patterns): unassigned = None domain = set("abcdefg") for s, e in assignments.items(): if e == "*": unassigned = s else: domain.remove(e) if unassigned is None: return assignments for d in domain: local = copy.deepcopy(assignments) local[unassigned] = d if consistent(local, patterns): local = backward_search(local, patterns) if local is not None: return local return None def consistent(assignments, patterns): for pattern in patterns: new_str = "".join(assignments[s] for s in pattern) candidates = [s for s in Numbers if len(s) == len(new_str)] candidate_found = False for candidate in candidates: matched = True for idx, char in enumerate(new_str): if char != "*" and char not in candidate: matched = False break if matched: candidate_found = True if not candidate_found: return False return True def import_clock(path): clock = [] lines = import_strs(path) for line in lines: patterns, signals = line.strip().split(" | ") clock.append([patterns.split(" "), signals.split(" ")]) return clock def main(): print_solutions( ["resources/day8-test.txt", "resources/day8.txt"], import_clock, part_1 ) print_solutions( ["resources/day8-test.txt", "resources/day8.txt"], import_clock, part_2 ) if __name__ == "__main__": main()
2,076
0
138
4344fa9db3e3548e4b2450324536e75bc3e05d90
1,660
py
Python
CUT/experiments/__init__.py
Theomat/colorization-av-enseirb-2020
c54c2388ea39a62289fa2f1c51b4757bf55d3c4f
[ "Apache-2.0" ]
1,422
2020-07-31T00:31:19.000Z
2022-03-31T11:35:26.000Z
CUT/experiments/__init__.py
Theomat/colorization-av-enseirb-2020
c54c2388ea39a62289fa2f1c51b4757bf55d3c4f
[ "Apache-2.0" ]
123
2020-07-31T04:16:03.000Z
2022-03-21T14:02:20.000Z
CUT/experiments/__init__.py
Theomat/colorization-av-enseirb-2020
c54c2388ea39a62289fa2f1c51b4757bf55d3c4f
[ "Apache-2.0" ]
324
2020-07-31T00:40:11.000Z
2022-03-31T10:01:10.000Z
import os import importlib if __name__ == "__main__": import sys import pickle assert len(sys.argv) >= 3 name = sys.argv[1] Launcher = find_launcher_using_name(name) cache = "/tmp/tmux_launcher/{}".format(name) if os.path.isfile(cache): instance = pickle.load(open(cache, 'r')) else: instance = Launcher() cmd = sys.argv[2] if cmd == "launch": instance.launch() elif cmd == "stop": instance.stop() elif cmd == "send": expid = int(sys.argv[3]) cmd = int(sys.argv[4]) instance.send_command(expid, cmd) os.makedirs("/tmp/tmux_launcher/", exist_ok=True) pickle.dump(instance, open(cache, 'w'))
30.181818
80
0.633735
import os import importlib def find_launcher_using_name(launcher_name): # cur_dir = os.path.dirname(os.path.abspath(__file__)) # pythonfiles = glob.glob(cur_dir + '/**/*.py') launcher_filename = "experiments.{}_launcher".format(launcher_name) launcherlib = importlib.import_module(launcher_filename) # In the file, the class called LauncherNameLauncher() will # be instantiated. It has to be a subclass of BaseLauncher, # and it is case-insensitive. launcher = None target_launcher_name = launcher_name.replace('_', '') + 'launcher' for name, cls in launcherlib.__dict__.items(): if name.lower() == target_launcher_name.lower(): launcher = cls if launcher is None: raise ValueError("In %s.py, there should be a subclass of BaseLauncher " "with class name that matches %s in lowercase." % (launcher_filename, target_launcher_name)) return launcher if __name__ == "__main__": import sys import pickle assert len(sys.argv) >= 3 name = sys.argv[1] Launcher = find_launcher_using_name(name) cache = "/tmp/tmux_launcher/{}".format(name) if os.path.isfile(cache): instance = pickle.load(open(cache, 'r')) else: instance = Launcher() cmd = sys.argv[2] if cmd == "launch": instance.launch() elif cmd == "stop": instance.stop() elif cmd == "send": expid = int(sys.argv[3]) cmd = int(sys.argv[4]) instance.send_command(expid, cmd) os.makedirs("/tmp/tmux_launcher/", exist_ok=True) pickle.dump(instance, open(cache, 'w'))
927
0
23
f2c84250d49b0b78d8283962f1d175aafa4b43ad
2,366
py
Python
tools/gyp_jc3_handling_editor.py
xforce/jc3-handling-editor
48485e91ef3ce0e2849ae5f5aae1ef40f989c4c6
[ "MIT" ]
4
2016-11-25T12:45:35.000Z
2020-02-20T21:55:49.000Z
tools/gyp_jc3_handling_editor.py
xforce/jc3-handling-editor
48485e91ef3ce0e2849ae5f5aae1ef40f989c4c6
[ "MIT" ]
6
2016-11-21T19:32:47.000Z
2017-03-21T17:18:43.000Z
tools/gyp_jc3_handling_editor.py
xforce/jc3-handling-editor
48485e91ef3ce0e2849ae5f5aae1ef40f989c4c6
[ "MIT" ]
2
2016-11-20T21:05:41.000Z
2022-01-31T02:19:12.000Z
#!/usr/bin/env python import glob import os import shlex import sys import platform script_dir = os.path.dirname(__file__) jc3_handling_editor_root = os.path.normpath(os.path.join(script_dir, os.pardir)) sys.path.insert(0, os.path.abspath(os.path.join(jc3_handling_editor_root, 'tools'))) sys.path.insert(0, os.path.join(jc3_handling_editor_root, 'tools', 'gyp', 'pylib')) import gyp if __name__ == '__main__': args = sys.argv[1:] # GYP bug. if sys.platform == 'win32': args.append(os.path.join(jc3_handling_editor_root, 'jc3_handling_editor.gyp')) standalone_fn = os.path.join(jc3_handling_editor_root, 'standalone.gypi') toolchain_fn = os.path.join(jc3_handling_editor_root, 'toolchain.gypi') common_fn = os.path.join(jc3_handling_editor_root, 'common.gypi') options_fn = os.path.join(jc3_handling_editor_root, 'config.gypi') else: args.append(os.path.join(os.path.abspath(jc3_handling_editor_root), 'jc3_handling_editor.gyp')) standalone_fn = os.path.join(os.path.abspath(jc3_handling_editor_root), 'standalone.gypi') toolchain_fn = os.path.join(os.path.abspath(jc3_handling_editor_root), 'toolchain.gypi') common_fn = os.path.join(os.path.abspath(jc3_handling_editor_root), 'common.gypi') options_fn = os.path.join(os.path.abspath(jc3_handling_editor_root), 'config.gypi') if os.path.exists(standalone_fn): args.extend(['-I', standalone_fn]) if os.path.exists(toolchain_fn): args.extend(['-I', toolchain_fn]) if os.path.exists(common_fn): args.extend(['-I', common_fn]) if os.path.exists(options_fn): args.extend(['-I', options_fn]) args.append('--depth=' + jc3_handling_editor_root) #args.append('-Dcomponent=shared_library') #args.append('-Dlibrary=shared_library') gyp_args = list(args) print os.environ.get('GYP_GENERATORS') gyp_generators = os.environ.get('GYP_GENERATORS') #if platform.system() == 'Linux' and gyp_generators != 'ninja': # --generator-output defines where the Makefile goes. gyp_args.append('--generator-output=out') # -Goutput_dir defines where the build output goes, relative to the # Makefile. Set it to . so that the build output doesn't end up in out/out. gyp_args.append('-Goutput_dir=.') run_gyp(gyp_args)
35.313433
99
0.724852
#!/usr/bin/env python import glob import os import shlex import sys import platform script_dir = os.path.dirname(__file__) jc3_handling_editor_root = os.path.normpath(os.path.join(script_dir, os.pardir)) sys.path.insert(0, os.path.abspath(os.path.join(jc3_handling_editor_root, 'tools'))) sys.path.insert(0, os.path.join(jc3_handling_editor_root, 'tools', 'gyp', 'pylib')) import gyp def run_gyp(args): rc = gyp.main(args) if rc != 0: print 'Error running GYP' sys.exit(rc) if __name__ == '__main__': args = sys.argv[1:] # GYP bug. if sys.platform == 'win32': args.append(os.path.join(jc3_handling_editor_root, 'jc3_handling_editor.gyp')) standalone_fn = os.path.join(jc3_handling_editor_root, 'standalone.gypi') toolchain_fn = os.path.join(jc3_handling_editor_root, 'toolchain.gypi') common_fn = os.path.join(jc3_handling_editor_root, 'common.gypi') options_fn = os.path.join(jc3_handling_editor_root, 'config.gypi') else: args.append(os.path.join(os.path.abspath(jc3_handling_editor_root), 'jc3_handling_editor.gyp')) standalone_fn = os.path.join(os.path.abspath(jc3_handling_editor_root), 'standalone.gypi') toolchain_fn = os.path.join(os.path.abspath(jc3_handling_editor_root), 'toolchain.gypi') common_fn = os.path.join(os.path.abspath(jc3_handling_editor_root), 'common.gypi') options_fn = os.path.join(os.path.abspath(jc3_handling_editor_root), 'config.gypi') if os.path.exists(standalone_fn): args.extend(['-I', standalone_fn]) if os.path.exists(toolchain_fn): args.extend(['-I', toolchain_fn]) if os.path.exists(common_fn): args.extend(['-I', common_fn]) if os.path.exists(options_fn): args.extend(['-I', options_fn]) args.append('--depth=' + jc3_handling_editor_root) #args.append('-Dcomponent=shared_library') #args.append('-Dlibrary=shared_library') gyp_args = list(args) print os.environ.get('GYP_GENERATORS') gyp_generators = os.environ.get('GYP_GENERATORS') #if platform.system() == 'Linux' and gyp_generators != 'ninja': # --generator-output defines where the Makefile goes. gyp_args.append('--generator-output=out') # -Goutput_dir defines where the build output goes, relative to the # Makefile. Set it to . so that the build output doesn't end up in out/out. gyp_args.append('-Goutput_dir=.') run_gyp(gyp_args)
80
0
23
04f5b502729ac86c25e910f36933883cca287f3f
627
py
Python
sliceprovider/ResourceManagement.py
fernnf/idc
6abf0e55467ab2b2cd3a22b869b3e0698ebfc5d2
[ "Apache-2.0" ]
null
null
null
sliceprovider/ResourceManagement.py
fernnf/idc
6abf0e55467ab2b2cd3a22b869b3e0698ebfc5d2
[ "Apache-2.0" ]
3
2018-08-16T14:35:11.000Z
2018-08-16T14:52:12.000Z
sliceprovider/ResourceManagement.py
fernnf/idc
6abf0e55467ab2b2cd3a22b869b3e0698ebfc5d2
[ "Apache-2.0" ]
null
null
null
""" Res. & VM Mgmt. component of the Infrastructure Management & Abstraction (IMA) Interacts with Docker to replicate the container from origin to destiny """ from docker import APIClient
27.26087
78
0.740032
""" Res. & VM Mgmt. component of the Infrastructure Management & Abstraction (IMA) Interacts with Docker to replicate the container from origin to destiny """ from docker import APIClient def replicateContainer(containerID, orig_url, dest_url): orig_client = APIClient(base_url=orig_url, tlf=False) dest_client = APIClient(base_url=dest_url, tls=False) image = orig_client.get_image(containerID) data=None for chunck in image: if data == None: data = chunck else: data += chunck dest_client.load_image(data) container = dest_client.create_container(containerID, name=containerID)
416
0
23
1207bd1d9849c7d09ef1136111f4fbe44b5966be
3,921
py
Python
src/parser.py
zhexiongliu/Nasdaq-ITCH-5.0-VWAP
aa78482171d2172d4feeb826b78e00dee195c181
[ "MIT" ]
9
2019-06-04T14:08:17.000Z
2021-11-24T12:02:57.000Z
src/parser.py
zhexiongliu/Nasdaq-ITCH-5.0-VWAP
aa78482171d2172d4feeb826b78e00dee195c181
[ "MIT" ]
null
null
null
src/parser.py
zhexiongliu/Nasdaq-ITCH-5.0-VWAP
aa78482171d2172d4feeb826b78e00dee195c181
[ "MIT" ]
2
2019-10-06T01:47:26.000Z
2022-01-10T13:20:58.000Z
import gzip import struct import datetime import pandas as pd import os if __name__ == '__main__': bin_data = gzip.open(os.path.join('..', 'res', '08302019.NASDAQ_ITCH50.gz'), 'rb') msg_header = bin_data.read(1) itch = ITCH() while msg_header: if msg_header == "S": message = itch.get_binary(11) elif msg_header == "R": message = itch.get_binary(38) elif msg_header == "H": message = itch.get_binary(24) elif msg_header == "Y": message = itch.get_binary(19) elif msg_header == "L": message = itch.get_binary(25) elif msg_header == "V": message = itch.get_binary(34) elif msg_header == "W": message = itch.get_binary(11) elif msg_header == "K": message = itch.get_binary(27) elif msg_header == "A": message = itch.get_binary(35) elif msg_header == "F": message = itch.get_binary(39) elif msg_header == "E": message = itch.get_binary(30) elif msg_header == "C": message = itch.get_binary(35) elif msg_header == "X": message = itch.get_binary(22) elif msg_header == "D": message = itch.get_binary(18) elif msg_header == "U": message = itch.get_binary(34) elif msg_header == "P": message = itch.get_binary(43) itch.get_vwap(message) elif msg_header == "Q": message = itch.get_binary(39) elif msg_header == "B": message = itch.get_binary(18) elif msg_header == "I": message = itch.get_binary(49) elif msg_header == "N": message = itch.get_binary(19) msg_header = bin_data.read(1) bin_data.close()
29.261194
115
0.530477
import gzip import struct import datetime import pandas as pd import os class ITCH(): def __init__(self): self.temp = [] self.flag = None if not os.path.exists(os.path.join('.', 'output')): os.makedirs(os.path.join('.', 'output')) def get_binary(self, size): read = bin_data.read(size) return read def convert_time(self, stamp): time = datetime.datetime.fromtimestamp(stamp / 1e9) time = time.strftime('%H:%M:%S') return time def cal_vwap(self, df): df['amount'] = df['price'] * df['volume'] df['time'] = pd.to_datetime(df['time']) df = df.groupby([df['time'].dt.hour, df['symbol']])['amount', 'volume'].sum() df['vwap'] = df['amount'] / df['volume'] df['vwap'] = df['vwap'].round(2) df = df.reset_index() df['time'] = df.apply(lambda x: str(x['time']) + ':00:00', axis=1) df = df[['time', 'symbol', 'vwap']] return df def get_vwap(self, message): parsed_data, hour = self.trade_message(message) if self.flag is None: self.flag = hour if self.flag != hour: df = pd.DataFrame(self.temp, columns=['time', 'symbol', 'price', 'volume']) result = self.cal_vwap(df) result.to_csv(os.path.join('..', 'output', str(self.flag) + '.txt'), sep=' ', index=False) print(result) self.temp = [] self.flag = hour self.temp.append(parsed_data) def trade_message(self, msg): msg_type = 'P' temp = struct.unpack('>4s6sQcI8cIQ', msg) new_msg = struct.pack('>s4s2s6sQsI8sIQ', msg_type, temp[0], '\x00\x00', temp[1], temp[2], temp[3], temp[4], ''.join(list(temp[5:13])), temp[13], temp[14]) value = struct.unpack('>sHHQQsI8sIQ', new_msg) value = list(value) value[3] = self.convert_time(value[3]) value[7] = value[7].strip() value[8] = float(value[8]) value[8] = value[8] / 10000 return [value[3], value[7], value[8], value[6]], value[3].split(':')[0] if __name__ == '__main__': bin_data = gzip.open(os.path.join('..', 'res', '08302019.NASDAQ_ITCH50.gz'), 'rb') msg_header = bin_data.read(1) itch = ITCH() while msg_header: if msg_header == "S": message = itch.get_binary(11) elif msg_header == "R": message = itch.get_binary(38) elif msg_header == "H": message = itch.get_binary(24) elif msg_header == "Y": message = itch.get_binary(19) elif msg_header == "L": message = itch.get_binary(25) elif msg_header == "V": message = itch.get_binary(34) elif msg_header == "W": message = itch.get_binary(11) elif msg_header == "K": message = itch.get_binary(27) elif msg_header == "A": message = itch.get_binary(35) elif msg_header == "F": message = itch.get_binary(39) elif msg_header == "E": message = itch.get_binary(30) elif msg_header == "C": message = itch.get_binary(35) elif msg_header == "X": message = itch.get_binary(22) elif msg_header == "D": message = itch.get_binary(18) elif msg_header == "U": message = itch.get_binary(34) elif msg_header == "P": message = itch.get_binary(43) itch.get_vwap(message) elif msg_header == "Q": message = itch.get_binary(39) elif msg_header == "B": message = itch.get_binary(18) elif msg_header == "I": message = itch.get_binary(49) elif msg_header == "N": message = itch.get_binary(19) msg_header = bin_data.read(1) bin_data.close()
1,886
-8
185
c5ab311027e4d350a3a36cc33e8cee4864e34548
198
py
Python
4.py
lzbferrari/ljb_python
e545891c707e412bb12601c94b456be721c85573
[ "MIT" ]
null
null
null
4.py
lzbferrari/ljb_python
e545891c707e412bb12601c94b456be721c85573
[ "MIT" ]
null
null
null
4.py
lzbferrari/ljb_python
e545891c707e412bb12601c94b456be721c85573
[ "MIT" ]
null
null
null
''' 创造一个变量他等于5如果这个变量小于10会打印我打得过那些忍者,小于30会打印有点难,不过我能应付如果小于50会打印太多了 ''' ninjas = 5 if ninjas < 10: print('我打得过那些忍者') elif ninjas < 30: print('有点难,不过我能应付') elif ninjas < 50: print('太多了')
15.230769
61
0.681818
''' 创造一个变量他等于5如果这个变量小于10会打印我打得过那些忍者,小于30会打印有点难,不过我能应付如果小于50会打印太多了 ''' ninjas = 5 if ninjas < 10: print('我打得过那些忍者') elif ninjas < 30: print('有点难,不过我能应付') elif ninjas < 50: print('太多了')
0
0
0
0d8cdabb6bfa49bc5a270e7bc3fa38275087f891
1,273
py
Python
tests/ut/python/parallel/test_auto_parallel_inference.py
doc22940/mindspore
21bcdcd8adb97b9171b2822a7ed2c4c138c99607
[ "Apache-2.0" ]
1
2020-05-13T11:31:21.000Z
2020-05-13T11:31:21.000Z
tests/ut/python/parallel/test_auto_parallel_inference.py
doc22940/mindspore
21bcdcd8adb97b9171b2822a7ed2c4c138c99607
[ "Apache-2.0" ]
null
null
null
tests/ut/python/parallel/test_auto_parallel_inference.py
doc22940/mindspore
21bcdcd8adb97b9171b2822a7ed2c4c138c99607
[ "Apache-2.0" ]
null
null
null
import numpy as np import mindspore.nn as nn from mindspore import Tensor, context from mindspore.ops import operations as P from mindspore.nn import WithLossCell, TrainOneStepCell from mindspore.nn import Momentum from mindspore.parallel._cost_model_context import set_cost_model_context
34.405405
88
0.728987
import numpy as np import mindspore.nn as nn from mindspore import Tensor, context from mindspore.ops import operations as P from mindspore.nn import WithLossCell, TrainOneStepCell from mindspore.nn import Momentum from mindspore.parallel._cost_model_context import set_cost_model_context class Net(nn.Cell): def __init__(self, input_ch, out_ch): super(Net, self).__init__() self.dense = nn.Dense(input_ch, out_ch) self.relu = P.ReLU() def construct(self, x): x = self.dense(x) x = self.relu(x) return x def test_inference_phase(): context.set_auto_parallel_context(device_num=8, global_rank=0) context.set_auto_parallel_context(parallel_mode="auto_parallel") set_cost_model_context(run_phase=1) net = Net(512, 128) predict = Tensor(np.ones([64, 512]).astype(np.float32) * 0.001) label = Tensor(np.ones([64, 128]).astype(np.float32)) loss = nn.SoftmaxCrossEntropyWithLogits() optimizer = Momentum(params=net.trainable_params(), learning_rate=0.1, momentum=0.9) net_with_loss = WithLossCell(net, loss) train_network = TrainOneStepCell(net_with_loss, optimizer) train_network.set_train() train_network.set_auto_parallel() output = train_network(predict, label)
887
-2
99
f9da18cdb4b8afbbf3b00ddbb3ca9a4c8a07f66c
2,043
py
Python
agua_amiga/gui/streak_window.py
mpnordland/agua_amiga
abbfaceea6aa90332a7782c2ea6d01f1fec1d41a
[ "MIT" ]
2
2022-01-04T10:35:24.000Z
2022-02-02T21:51:25.000Z
agua_amiga/gui/streak_window.py
mpnordland/agua_amiga
abbfaceea6aa90332a7782c2ea6d01f1fec1d41a
[ "MIT" ]
null
null
null
agua_amiga/gui/streak_window.py
mpnordland/agua_amiga
abbfaceea6aa90332a7782c2ea6d01f1fec1d41a
[ "MIT" ]
null
null
null
from gi.repository import GLib, Gio, Gtk from datetime import datetime, timedelta, date from agua_amiga.datastore import Datastore, convert_from_display_to_mL, convert_from_mL_to_display @Gtk.Template.from_file("ui_definitions/StreakWindow.glade")
38.54717
160
0.657367
from gi.repository import GLib, Gio, Gtk from datetime import datetime, timedelta, date from agua_amiga.datastore import Datastore, convert_from_display_to_mL, convert_from_mL_to_display @Gtk.Template.from_file("ui_definitions/StreakWindow.glade") class StreakWindow(Gtk.Window): __gtype_name__ = "StreakWindow" calendar_streaks: Gtk.Button = Gtk.Template.Child() def __init__(self, *args, **kwargs) -> None: assert 'datastore' in kwargs.keys() self.datastore: Datastore = kwargs['datastore'] del kwargs['datastore'] super().__init__(*args, **kwargs) self.update_marked_days() def update_marked_days(self): self.calendar_streaks.clear_marks() month_start = date(self.calendar_streaks.get_property('year'), self.calendar_streaks.get_property('month') + 1, 1) if month_start.month < 12: month_end = month_start.replace(month=month_start.month + 1) - timedelta(days=1) else: month_end = date(month_start.year + 1, 1, 1) display_units = self.datastore.get_display_units() days = self.datastore.get_days_drunk_water(month_start, month_end) if days is not None: water_data = dict(days) for day in days: parsed_day = date.fromisoformat(day[0]) self.calendar_streaks.mark_day(parsed_day.day) def detail_func(widget, year, month, day): try: day_volume_drunk = water_data[f"{year}-{month+1:2}-{day:2}"] return f"<span size=\"medium\" line_height=\"2\">{convert_from_mL_to_display(day_volume_drunk, display_units)} {display_units.value}</span>" except Exception as e: return f"<span size=\"medium\" line_height=\"2\">0 {display_units.value}</span>" self.calendar_streaks.set_detail_func(detail_func) @Gtk.Template.Callback() def calendar_streaks_month_changed_cb(self, widget, **_kwargs): self.update_marked_days()
1,555
217
22
1102cf09a961527fe7f8f6eccb81022c752e0bfc
957
py
Python
subset.py
zygmuntz/phraug2
ef2a6d42008b31024407208d2b9798ddddc2b7a5
[ "BSD-2-Clause" ]
190
2015-01-14T15:32:30.000Z
2022-02-17T08:18:27.000Z
subset.py
zygmuntz/phraug2
ef2a6d42008b31024407208d2b9798ddddc2b7a5
[ "BSD-2-Clause" ]
6
2015-04-24T17:07:08.000Z
2020-01-13T10:07:37.000Z
subset.py
zygmuntz/phraug2
ef2a6d42008b31024407208d2b9798ddddc2b7a5
[ "BSD-2-Clause" ]
69
2015-01-25T19:49:42.000Z
2021-02-10T05:24:34.000Z
'Save a subset of lines from an input file; start at offset and count n lines' 'default 100 lines starting from 0' import sys import argparse parser = argparse.ArgumentParser( description = "Save a subset of lines from the input file to the output file" ) parser.add_argument( "input_file", help = "path to input file" ) parser.add_argument( "output_file", help = "path to output file" ) parser.add_argument( "-o", "--offset", help = "line number to start from, default 0", type = int, default = 0 ) parser.add_argument( "-l", "--lines", help = "number of lines to write, default 100", type = int, default = 100 ) args = parser.parse_args() try: lines = int( sys.argv[4] ) except IndexError: lines = 100 i = open( args.input_file ) o = open( args.output_file, 'wb' ) offset = args.offset count = 0 for line in i: if offset > 0: offset -= 1 continue o.write( line ) count += 1 if count >= args.lines: break
19.9375
113
0.668757
'Save a subset of lines from an input file; start at offset and count n lines' 'default 100 lines starting from 0' import sys import argparse parser = argparse.ArgumentParser( description = "Save a subset of lines from the input file to the output file" ) parser.add_argument( "input_file", help = "path to input file" ) parser.add_argument( "output_file", help = "path to output file" ) parser.add_argument( "-o", "--offset", help = "line number to start from, default 0", type = int, default = 0 ) parser.add_argument( "-l", "--lines", help = "number of lines to write, default 100", type = int, default = 100 ) args = parser.parse_args() try: lines = int( sys.argv[4] ) except IndexError: lines = 100 i = open( args.input_file ) o = open( args.output_file, 'wb' ) offset = args.offset count = 0 for line in i: if offset > 0: offset -= 1 continue o.write( line ) count += 1 if count >= args.lines: break
0
0
0
6e26e1f0af0b391c2d44a64eeef08c7927ec934b
13,401
py
Python
pygw2/core/models/character.py
Natsku123/pygw2
c8e579c07f4d33c7afadc8dee510be0a5e71d16e
[ "MIT" ]
1
2020-01-19T21:27:53.000Z
2020-01-19T21:27:53.000Z
pygw2/core/models/character.py
Natsku123/pygw2
c8e579c07f4d33c7afadc8dee510be0a5e71d16e
[ "MIT" ]
1
2021-12-09T21:18:13.000Z
2021-12-09T21:18:13.000Z
pygw2/core/models/character.py
Natsku123/pygw2
c8e579c07f4d33c7afadc8dee510be0a5e71d16e
[ "MIT" ]
null
null
null
import datetime from typing import Optional, List, Union, TYPE_CHECKING from pygw2.core.enums import * from pygw2.utils import LazyLoader, BaseModel if TYPE_CHECKING: from pygw2.core.models.pvp import PvPEquipment from pygw2.core.models.sab import SAB from pygw2.core.models.guild import Guild from pygw2.core.models.misc import Title, Color from pygw2.core.models.backstory import BiographyAnswer from pygw2.core.models.items import Item from pygw2.core.models.general import Skin, ItemStat from pygw2.core.models.wvw import WvWAbility
23.428322
85
0.676367
import datetime from typing import Optional, List, Union, TYPE_CHECKING from pygw2.core.enums import * from pygw2.utils import LazyLoader, BaseModel if TYPE_CHECKING: from pygw2.core.models.pvp import PvPEquipment from pygw2.core.models.sab import SAB from pygw2.core.models.guild import Guild from pygw2.core.models.misc import Title, Color from pygw2.core.models.backstory import BiographyAnswer from pygw2.core.models.items import Item from pygw2.core.models.general import Skin, ItemStat from pygw2.core.models.wvw import WvWAbility class Crafting(BaseModel): discipline: Discipline rating: int active: bool class Attributes(BaseModel): Power: Optional[int] = 0 Precision: Optional[int] = 0 Toughness: Optional[int] = 0 Vitality: Optional[int] = 0 ConditionDamage: Optional[int] = 0 ConditionDuration: Optional[int] = 0 Healing: Optional[int] = 0 BoonDuration: Optional[int] = 0 class Stats(BaseModel): id: int values_: LazyLoader @property def values(self) -> "ItemStat": return self.values_() attributes: Attributes class Equipment(BaseModel): id: int item_: LazyLoader tabs: Optional[List[int]] @property def item(self) -> "Item": return self.item_() slot: Optional[EquipmentSlot] location: EquipmentLocation infusions_: Optional[LazyLoader] @property def infusions(self) -> List["Item"]: return self.infusions_() if self.infusions_ is not None else None upgrades_: Optional[LazyLoader] @property def upgrades(self) -> List["Item"]: return self.upgrades_() if self.upgrades_ is not None else None skin_: Optional[LazyLoader] @property def skin(self) -> List["Skin"]: return self.skin_() if self.skin_ is not None else None stats: Optional[Stats] = None binding: Optional[Binding] = None charges: Optional[int] = None bound_to: Optional[str] = None dyes_: Optional[LazyLoader] @property def dyes(self) -> List["Color"]: return self.dyes_() if self.dyes_ is not None else None class ItemInventory(BaseModel): id: int item_: LazyLoader @property def item(self) -> "Item": return self.item_() count: int infusions_: Optional[LazyLoader] @property def infusions(self) -> List["Item"]: return self.infusions_() if self.infusions_ is not None else None upgrades_: Optional[LazyLoader] @property def upgrades(self) -> List["Item"]: return self.upgrades_() if self.upgrades_ is not None else None skin_: Optional[LazyLoader] @property def skin(self) -> List["Skin"]: return self.skin_() if self.skin_ is not None else None stats: Optional[Stats] = None binding: Optional[Binding] = None bound_to: Optional[str] = None class Bag(BaseModel): id: int item_: LazyLoader @property def item(self) -> "Item": return self.item_() size: int inventory: List[Union[ItemInventory, None]] class SkillsBase(BaseModel): heal_: Optional[LazyLoader] @property def heal(self) -> "Skill": return self.heal_() if self.heal_ else None utilities_: Optional[LazyLoader] @property def utilities(self) -> List["Skill"]: return self.utilities_() if self.utilities_ else None elite_: Optional[LazyLoader] @property def elite(self): return self.elite_() if self.elite_ else None legends_: Optional[LazyLoader] @property def legends(self): return self.legends_() if self.legends_ is not None else None class SkillTree(BaseModel): id: int # TODO compared against the training section for each /v2/professions spent: int done: bool class Skills(BaseModel): pve: SkillsBase pvp: SkillsBase wvw: SkillsBase class SpecializationBase(BaseModel): id: Optional[int] specialization_: Optional[LazyLoader] @property def specialization(self) -> "Specialization": return self.specialization_() if self.specialization_ else None traits_: Optional[LazyLoader] @property def traits(self) -> List["Trait"]: return self.traits_() if self.traits_ else None class Specializations(BaseModel): pve: SpecializationBase pvp: SpecializationBase wvw: SpecializationBase class CharacterCore(BaseModel): name: str race: Races gender: Gender profession: Professions level: int guild_: Optional[LazyLoader] = None @property def guild(self) -> "Guild": return self.guild_() if self.guild_ is not None else None age: int created: datetime.datetime deaths: int title_: Optional[LazyLoader] = None @property def title(self) -> "Title": return self.title_() if self.title_ is not None else None class Build(BaseModel): name: str profession: Professions specializations: List[SpecializationBase] skills: SkillsBase aquatic_skills: SkillsBase class BuildTab(BaseModel): tab: int is_active: bool build: Build class EquipmentTab(BaseModel): tab: int name: str is_active: bool equipment: List["Equipment"] equipment_pvp: "PvPEquipment" class Character(BaseModel): name: str race: Races gender: Gender profession: Professions level: int build_tabs_unlocked: int active_build_tab: int equipment_tabs_unlocked: int active_equipment_tab: int guild_: Optional[LazyLoader] = None @property def guild(self) -> "Guild": return self.guild_() if self.guild_ is not None else None age: int created: datetime.datetime deaths: int title_: Optional[LazyLoader] = None @property def title(self) -> "Title": return self.title_() if self.title_ is not None else None backstory_: LazyLoader @property def backstory(self) -> List["BiographyAnswer"]: return self.backstory_() crafting: List["Crafting"] equipment: List["Equipment"] build_tabs: List["BuildTab"] equipment_tabs: List["EquipmentTab"] heropoints_: LazyLoader @property def heropoints(self) -> List["str"]: # TODO replace with proper lookup return self.heropoints_() bags: List[Optional[Bag]] = [] @property def inventory(self) -> List[Optional[Bag]]: return self.bags training: List[SkillTree] sab_: LazyLoader @property def sab(self) -> "SAB": return self.sab_() wvw_abilities: List["CharacterWvWAbility"] flags: List[CharacterFlag] = [] class CharacterWvWAbility(BaseModel): id: int ability_: LazyLoader @property def ability(self) -> "WvWAbility": return self.ability_() rank: int class ProfessionTrainingTrack(BaseModel): cost: int = 0 type: ProfessionTrainingTrackType skill_id: Optional[int] skill_: Optional[LazyLoader] @property def skill(self) -> Optional["Skill"]: return self.skill_() if self.skill_ is not None else None trait_id: Optional[int] trait_: Optional[LazyLoader] @property def trait(self) -> Optional["Trait"]: return self.trait_() if self.trait_ is not None else None class ProfessionTraining(BaseModel): id: int skill_: LazyLoader @property def skill(self) -> "Skill": return self.skill_() specialization_: LazyLoader @property def specialization(self) -> "Specialization": return self.specialization_() category: ProfessionTrainingCategory name: str track: List[ProfessionTrainingTrack] class WeaponSkill(BaseModel): id: int skill_: LazyLoader @property def skill(self) -> "Skill": return self.skill_() slot: SkillSlot offhand: Optional[str] attunement: Optional[str] source: Optional[str] class ProfessionWeapon(BaseModel): flag: Optional[List[ProfessionWeaponFlag]] specialization_: Optional[LazyLoader] @property def specialization(self) -> Optional["Specialization"]: return self.specialization_() if self.specialization_ is not None else None skills: List[WeaponSkill] class ProfessionWeapons(BaseModel): Axe: Optional["ProfessionWeapon"] Dagger: Optional["ProfessionWeapon"] Mace: Optional["ProfessionWeapon"] Pistol: Optional["ProfessionWeapon"] Sword: Optional["ProfessionWeapon"] Scepter: Optional["ProfessionWeapon"] Focus: Optional["ProfessionWeapon"] Shield: Optional["ProfessionWeapon"] Torch: Optional["ProfessionWeapon"] Warhorn: Optional["ProfessionWeapon"] Greatsword: Optional["ProfessionWeapon"] Hammer: Optional["ProfessionWeapon"] Longbow: Optional["ProfessionWeapon"] Rifle: Optional["ProfessionWeapon"] Shortbow: Optional["ProfessionWeapon"] Staff: Optional["ProfessionWeapon"] Speargun: Optional["ProfessionWeapon"] Spear: Optional["ProfessionWeapon"] Trident: Optional["ProfessionWeapon"] class Profession(BaseModel): id: str = "" name: str = "" icon: str = "" icon_big: str = "" specializations_: LazyLoader @property def specializations(self) -> List["Specialization"]: return self.specializations_() training: List[ProfessionTraining] weapons: ProfessionWeapons class Race(BaseModel): id: str skills_: LazyLoader @property def skills(self) -> List["Skill"]: return self.skills_() class Specialization(BaseModel): id: int name: str profession: str elite: bool = False icon: str background: str minor_traits_: LazyLoader @property def minor_traits(self) -> List["Trait"]: return self.minor_traits_() major_traits_: LazyLoader @property def major_traits(self) -> List["Trait"]: return self.major_traits_() class SkillFactPrefix(BaseModel): text: str icon: str status: str description: str class SkillFact(BaseModel): text: Optional[str] icon: Optional[str] type: SkillFactType value: Optional[Union[int, bool]] target: Optional[str] status: Optional[str] description: Optional[str] apply_count: Optional[int] duration: Optional[int] field_type: Optional[ComboFieldType] finisher_type: Optional[ComboFinisherType] percent: Optional[int] hit_count: Optional[int] dmg_multiplier: Optional[int] distance: Optional[int] prefix: Optional[SkillFactPrefix] class SkillTraitedFact(SkillFact): requires_trait_: LazyLoader @property def requires_trait(self) -> "Trait": return self.requires_trait_() overrides: Optional[int] # TODO resolve from facts class Skill(BaseModel): id: int name: str description: Optional[str] icon: str chat_link: str type: Optional[SkillType] weapon_type: Optional[WeaponType] professions: List[Professions] slot: SkillSlot facts: Optional[List[SkillFact]] traited_facts: Optional[List[SkillTraitedFact]] categories: Optional[List[SkillCategories]] attunement: Optional[Attunement] cost: Optional[int] dual_wield: Optional[str] flip_skill_: Optional[LazyLoader] @property def flip_skill(self) -> Optional["Skill"]: return self.flip_skill_() if self.flip_skill_ is not None else None initiative: Optional[int] next_chain_: Optional[LazyLoader] @property def next_chain(self) -> Optional["Skill"]: return self.next_chain_() if self.next_chain_ is not None else None prev_chain_: Optional[LazyLoader] @property def prev_chain(self) -> Optional["Skill"]: return self.prev_chain_ if self.prev_chain_ is not None else None transform_skills_: Optional[LazyLoader] @property def transform_skills(self) -> Optional[List["Skill"]]: return self.transform_skills_ if self.transform_skills_ is not None else None bundle_skills_: Optional[LazyLoader] @property def bundle_skills(self) -> Optional[List["Skill"]]: return self.bundle_skills_ if self.bundle_skills_ is not None else None toolbelt_skill_: Optional[LazyLoader] @property def toolbelt_skill(self) -> Optional["Skill"]: return self.toolbelt_skill_ if self.toolbelt_skill_ is not None else None class TraitSkill(BaseModel): id: int name: str description: str icon: str facts: Optional[List[SkillFact]] traited_facts: Optional[List[SkillTraitedFact]] class Trait(BaseModel): id: int name: str icon: str description: str specialization_: LazyLoader @property def specialization(self) -> "Specialization": return self.specialization_() tier: TraitTier slot: TraitSlot facts: Optional[List[SkillFact]] traited_facts: Optional[List[SkillTraitedFact]] skills: Optional[List[TraitSkill]] class Legend(BaseModel): id: str = "" swap_: LazyLoader @property def swap(self) -> "Skill": return self.swap_() heal_: LazyLoader @property def heal(self) -> "Skill": return self.heal_() elite_: LazyLoader @property def elite(self) -> "Skill": return self.elite_() utilities_: LazyLoader @property def utilities(self) -> List["Skill"]: return self.utilities_()
3,203
8,859
736
531964ba161ada42348b3db5388f68060006c2ec
2,423
py
Python
plotter.py
BartlomiejF/ParHumidSensor
4782a0ae646c13d037fbe9bceda2d6ec5c440b72
[ "MIT" ]
null
null
null
plotter.py
BartlomiejF/ParHumidSensor
4782a0ae646c13d037fbe9bceda2d6ec5c440b72
[ "MIT" ]
null
null
null
plotter.py
BartlomiejF/ParHumidSensor
4782a0ae646c13d037fbe9bceda2d6ec5c440b72
[ "MIT" ]
null
null
null
import pandas as pd import matplotlib.pyplot as plt from io import BytesIO import base64
41.775862
96
0.666116
import pandas as pd import matplotlib.pyplot as plt from io import BytesIO import base64 def plot(airqualjson=None, temphumidjson=None): # carnb_data = pd.DataFrame.from_dict(carnbjson) figdata_airqual_png = None figdata_temphumid_png = None if airqualjson is not None: airqual_data = pd.DataFrame.from_dict(airqualjson) airqual_time = airqual_data["date_time"].astype("datetime64") fig_airqual, ax_airqual = plt.subplots(1, 2, figsize=(10, 5)) ax_airqual[0].plot(airqual_time, airqual_data["pm2_5"]) ax_airqual[0].axhline(20, ls="--", c="r") ax_airqual[0].axhspan(0, 20, 0, 1, color="g", alpha=0.2) ax_airqual[0].set_title("pm 2.5") axymax = airqual_data["pm2_5"].max() axymax = axymax*1.1 if axymax > 20: ax_airqual[0].axhspan(20, axymax, 0, 1, color="r", alpha=0.2) ax_airqual[1].plot(airqual_time, airqual_data["pm10"]) ax_airqual[1].axhline(50, ls="--", c="r") ax_airqual[1].axhspan(0, 50, 0, 1, color="g", alpha=0.2) ax_airqual[1].set_title("pm 10") ax1ymax = airqual_data["pm10"].max() ax1ymax = ax1ymax*1.1 if ax1ymax > 50: ax_airqual[1].axhspan(50, ax1ymax, 0, 1, color="r", alpha=0.2) figfile_airqual = BytesIO() fig_airqual.savefig(figfile_airqual, format='png') figfile_airqual.seek(0) # rewind to beginning of file figdata_airqual_png = figfile_airqual.getvalue() # extract string (stream of bytes) figdata_airqual_png = base64.b64encode(figdata_airqual_png) if temphumidjson is not None: temphumid_data = pd.DataFrame.from_dict(temphumidjson) temphumid_time = temphumid_data["date_time"].astype("datetime64") fig_temphumid, ax_temphumid = plt.subplots(1, 2, figsize=(10, 5)) ax_temphumid[0].plot(temphumid_time, temphumid_data["temperature"]) ax_temphumid[1].plot(temphumid_time, temphumid_data["humidity"]) figfile_temphumid = BytesIO() fig_temphumid.savefig(figfile_temphumid, format='png') figfile_temphumid.seek(0) # rewind to beginning of file figdata_temphumid_png = figfile_temphumid.getvalue() # extract string (stream of bytes) figdata_temphumid_png = base64.b64encode(figdata_temphumid_png) return {"airqual": figdata_airqual_png, "temphumid": figdata_temphumid_png,}
2,310
0
23
0c09238973033b093fa0969c6f3f3f36233309e5
1,906
py
Python
src/sentry/api/bases/team.py
augustand/sentry
9e5e0b6744d878d623f483661e40c49bb902047c
[ "BSD-3-Clause" ]
1
2021-08-10T06:07:13.000Z
2021-08-10T06:07:13.000Z
src/sentry/api/bases/team.py
augustand/sentry
9e5e0b6744d878d623f483661e40c49bb902047c
[ "BSD-3-Clause" ]
5
2019-12-28T18:13:59.000Z
2022-03-02T04:32:45.000Z
src/sentry/api/bases/team.py
augustand/sentry
9e5e0b6744d878d623f483661e40c49bb902047c
[ "BSD-3-Clause" ]
1
2017-04-08T04:09:18.000Z
2017-04-08T04:09:18.000Z
from __future__ import absolute_import from sentry.api.base import Endpoint from sentry.api.exceptions import ResourceDoesNotExist from sentry.app import raven from sentry.models import Team, TeamStatus from sentry.models.apikey import ROOT_KEY from .organization import OrganizationPermission
31.245902
83
0.643757
from __future__ import absolute_import from sentry.api.base import Endpoint from sentry.api.exceptions import ResourceDoesNotExist from sentry.app import raven from sentry.models import Team, TeamStatus from sentry.models.apikey import ROOT_KEY from .organization import OrganizationPermission class TeamPermission(OrganizationPermission): scope_map = { 'GET': ['team:read', 'team:write', 'team:delete'], 'POST': ['team:write', 'team:delete'], 'PUT': ['team:write', 'team:delete'], 'DELETE': ['team:delete'], } def has_object_permission(self, request, view, team): result = super(TeamPermission, self).has_object_permission( request, view, team.organization) if not result: return result if not (request.user and request.user.is_authenticated()) and request.auth: if request.auth is ROOT_KEY: return True return request.auth.organization_id == team.organization.id allowed_scopes = set(self.scope_map.get(request.method, [])) return any( request.access.has_team_scope(team, s) for s in allowed_scopes, ) class TeamEndpoint(Endpoint): permission_classes = (TeamPermission,) def convert_args(self, request, organization_slug, team_slug, *args, **kwargs): try: team = Team.objects.filter( organization__slug=organization_slug, slug=team_slug, ).select_related('organization').get() except Team.DoesNotExist: raise ResourceDoesNotExist if team.status != TeamStatus.VISIBLE: raise ResourceDoesNotExist self.check_object_permissions(request, team) raven.tags_context({ 'organization': team.organization_id, }) kwargs['team'] = team return (args, kwargs)
1,222
340
46
8e5dd8d936892226fc66c24989ea52dcf046dedb
663
py
Python
PyNEng/To-Do Task 5.2b.py
fugrusha/education
89712a93e0727e94543cb252482920a503135c7f
[ "MIT" ]
null
null
null
PyNEng/To-Do Task 5.2b.py
fugrusha/education
89712a93e0727e94543cb252482920a503135c7f
[ "MIT" ]
null
null
null
PyNEng/To-Do Task 5.2b.py
fugrusha/education
89712a93e0727e94543cb252482920a503135c7f
[ "MIT" ]
null
null
null
# Задание 5.2b london_co = { 'r1' : { 'location': '21 New Globe Walk', 'vendor': 'Cisco', 'model': '4451', 'ios': '15.4', 'ip': '10.255.0.1' }, 'r2' : { 'location': '21 New Globe Walk', 'vendor': 'Cisco', 'model': '4451', 'ios': '15.4', 'ip': '10.255.0.2' }, 'sw1' : { 'location': '21 New Globe Walk', 'vendor': 'Cisco', 'model': '3850', 'ios': '3.6.XE', 'ip': '10.255.0.101', 'vlans': '10,20,30', 'routing': True } } device = input('Enter divece name: ') key = dict(device).keys() param = input('Enter parameter name {}: '.format(key)) print(london_co[device][param])
20.090909
54
0.493213
# Задание 5.2b london_co = { 'r1' : { 'location': '21 New Globe Walk', 'vendor': 'Cisco', 'model': '4451', 'ios': '15.4', 'ip': '10.255.0.1' }, 'r2' : { 'location': '21 New Globe Walk', 'vendor': 'Cisco', 'model': '4451', 'ios': '15.4', 'ip': '10.255.0.2' }, 'sw1' : { 'location': '21 New Globe Walk', 'vendor': 'Cisco', 'model': '3850', 'ios': '3.6.XE', 'ip': '10.255.0.101', 'vlans': '10,20,30', 'routing': True } } device = input('Enter divece name: ') key = dict(device).keys() param = input('Enter parameter name {}: '.format(key)) print(london_co[device][param])
0
0
0
86bc01aa6a0d9b9a74b765460b3330e59f59daef
1,857
py
Python
src/snovault/elasticsearch/searches/interfaces.py
caseylitton/snovault
cfaace963bc9b1ca87bca4739b380c559026bcfb
[ "MIT" ]
null
null
null
src/snovault/elasticsearch/searches/interfaces.py
caseylitton/snovault
cfaace963bc9b1ca87bca4739b380c559026bcfb
[ "MIT" ]
null
null
null
src/snovault/elasticsearch/searches/interfaces.py
caseylitton/snovault
cfaace963bc9b1ca87bca4739b380c559026bcfb
[ "MIT" ]
null
null
null
ADVANCED_QUERY_KEY = 'advancedQuery' ALL = 'all' AND = 'AND' AND_JOIN = ' AND ' AND_NOT_JOIN = ' AND NOT ' APPENDED = 'appended' APPLICATION_JSON = 'application/json' ASC = 'asc' AT_ID = '@id' AT_CONTEXT = '@context' AT_TYPE = '@type' AUDIT = 'audit' AUDIT_TITLE = 'Audit' BOOL = 'bool' BOOST_VALUES = 'boost_values' BUCKETS = 'buckets' CLEAR_FILTERS = 'clear_filters' COLLECTION_NAME = 'item_type' COLUMNS = 'columns' DASH = '-' DEBUG_KEY = 'debug' DESC = 'desc' DOC_COUNT = 'doc_count' EMBEDDED = 'embedded.' EMBEDDED_FRAME = 'embedded' EMBEDDED_TYPE = 'embedded.@type' EXCLUDE = 'exclude' EXISTS = 'exists' FACETS = 'facets' FIELD_KEY = 'field' FILTERS = 'filters' FRAME_KEY = 'frame' FROM_KEY = 'from' GRAPH = '@graph' GROUP_BY = 'group_by' GROUP_SUBMITTER = 'group.submitter' HITS = 'hits' ITEM = 'Item' JS_IS_EQUAL = 'isEqual' JS_TRUE = 'true' JS_FALSE = 'false' JSONLD_CONTEXT = 'jsonld_context' KEY = 'key' LIMIT_KEY = 'limit' LENGTH = 'length' LONG = 'long' MATRIX = 'matrix' MATRIX_TITLE = 'Matrix' MODE_KEY = 'mode' NO = 'no' NO_LIMIT = 999999 NO_RESULTS_FOUND = 'No results found' NON_SORTABLE = 'non_sortable' NOT_JOIN = ' NOT ' NOT_FLAG = '!' NOTIFICATION = 'notification' OBJECT_FRAME = 'object' ORDER = 'order' PERIOD = '.' PICKER = 'picker' PRINCIPALS_ALLOWED_VIEW = 'principals_allowed.view' PROPERTIES = 'properties' QUERY_STRING = 'query_string' RAW_QUERY = 'raw_query' REMOVE = 'remove' REPORT_TITLE = 'Report' SEARCH_AUDIT = 'search_audit' SEARCH_BASE = 'search_base' SEARCH_PATH = '/search/' SEARCH_TITLE = 'Search' SEARCH_TERM_KEY = 'searchTerm' SIMPLE_QUERY_STRING = 'simple_query_string' SORT_KEY = 'sort' _SOURCE = '_source' SUCCESS = 'Success' SUMMARY_TITLE = 'Summary' SUMMARY = 'summary' TITLE = 'title' TERM = 'term' TERMS = 'terms' TOTAL = 'total' TYPES = 'types' TYPE_KEY = 'type' WILDCARD = '*' X = 'x' Y = 'y' YES = 'yes'
21.102273
51
0.706516
ADVANCED_QUERY_KEY = 'advancedQuery' ALL = 'all' AND = 'AND' AND_JOIN = ' AND ' AND_NOT_JOIN = ' AND NOT ' APPENDED = 'appended' APPLICATION_JSON = 'application/json' ASC = 'asc' AT_ID = '@id' AT_CONTEXT = '@context' AT_TYPE = '@type' AUDIT = 'audit' AUDIT_TITLE = 'Audit' BOOL = 'bool' BOOST_VALUES = 'boost_values' BUCKETS = 'buckets' CLEAR_FILTERS = 'clear_filters' COLLECTION_NAME = 'item_type' COLUMNS = 'columns' DASH = '-' DEBUG_KEY = 'debug' DESC = 'desc' DOC_COUNT = 'doc_count' EMBEDDED = 'embedded.' EMBEDDED_FRAME = 'embedded' EMBEDDED_TYPE = 'embedded.@type' EXCLUDE = 'exclude' EXISTS = 'exists' FACETS = 'facets' FIELD_KEY = 'field' FILTERS = 'filters' FRAME_KEY = 'frame' FROM_KEY = 'from' GRAPH = '@graph' GROUP_BY = 'group_by' GROUP_SUBMITTER = 'group.submitter' HITS = 'hits' ITEM = 'Item' JS_IS_EQUAL = 'isEqual' JS_TRUE = 'true' JS_FALSE = 'false' JSONLD_CONTEXT = 'jsonld_context' KEY = 'key' LIMIT_KEY = 'limit' LENGTH = 'length' LONG = 'long' MATRIX = 'matrix' MATRIX_TITLE = 'Matrix' MODE_KEY = 'mode' NO = 'no' NO_LIMIT = 999999 NO_RESULTS_FOUND = 'No results found' NON_SORTABLE = 'non_sortable' NOT_JOIN = ' NOT ' NOT_FLAG = '!' NOTIFICATION = 'notification' OBJECT_FRAME = 'object' ORDER = 'order' PERIOD = '.' PICKER = 'picker' PRINCIPALS_ALLOWED_VIEW = 'principals_allowed.view' PROPERTIES = 'properties' QUERY_STRING = 'query_string' RAW_QUERY = 'raw_query' REMOVE = 'remove' REPORT_TITLE = 'Report' SEARCH_AUDIT = 'search_audit' SEARCH_BASE = 'search_base' SEARCH_PATH = '/search/' SEARCH_TITLE = 'Search' SEARCH_TERM_KEY = 'searchTerm' SIMPLE_QUERY_STRING = 'simple_query_string' SORT_KEY = 'sort' _SOURCE = '_source' SUCCESS = 'Success' SUMMARY_TITLE = 'Summary' SUMMARY = 'summary' TITLE = 'title' TERM = 'term' TERMS = 'terms' TOTAL = 'total' TYPES = 'types' TYPE_KEY = 'type' WILDCARD = '*' X = 'x' Y = 'y' YES = 'yes'
0
0
0
bbed43c689132df7b14e284f2ed94a474f88d84a
399
py
Python
tests/conftest.py
dajor/youtube2
390a701c0de3cb7dd6240a775b980edda150c376
[ "MIT" ]
null
null
null
tests/conftest.py
dajor/youtube2
390a701c0de3cb7dd6240a775b980edda150c376
[ "MIT" ]
null
null
null
tests/conftest.py
dajor/youtube2
390a701c0de3cb7dd6240a775b980edda150c376
[ "MIT" ]
null
null
null
import pytest from app.app import create_app @pytest.fixture() @pytest.fixture() @pytest.fixture()
13.3
37
0.606516
import pytest from app.app import create_app @pytest.fixture() def app(): app = create_app() app.config.update( { "TESTING": True, } ) # other setup can go here yield app # clean up / reset resources here @pytest.fixture() def client(app): return app.test_client() @pytest.fixture() def runner(app): return app.test_cli_runner()
227
0
66
6c171d09be3d70b2660fd3b47e32ad03e08cc1fc
29,122
py
Python
src/charm.py
stgraber/charm-lxd
48c4b56a50902f9dcaebe6d3f80911927bab09ab
[ "Apache-2.0" ]
null
null
null
src/charm.py
stgraber/charm-lxd
48c4b56a50902f9dcaebe6d3f80911927bab09ab
[ "Apache-2.0" ]
null
null
null
src/charm.py
stgraber/charm-lxd
48c4b56a50902f9dcaebe6d3f80911927bab09ab
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python3 """LXD charm.""" import logging import os import shutil import subprocess import tarfile import tempfile from urllib.error import HTTPError, URLError from urllib.request import urlopen from ops.charm import ( ActionEvent, CharmBase, ConfigChangedEvent, InstallEvent, StartEvent, ) from ops.framework import StoredState from ops.main import main from ops.model import ActiveStatus, BlockedStatus, MaintenanceStatus, ModelError logger = logging.getLogger(__name__) SYSCTL_CONFIGS = { "fs.aio-max-nr": 524288, "fs.inotify.max_queued_events": 1048576, "fs.inotify.max_user_instances": 1048576, "fs.inotify.max_user_watches": 1048576, "kernel.dmesg_restrict": 1, "kernel.keys.maxbytes": 2000000, "kernel.keys.maxkeys": 2000, "net.core.bpf_jit_limit": 3000000000, "net.ipv4.neigh.default.gc_thresh3": 8192, "net.ipv6.neigh.default.gc_thresh3": 8192, "vm.max_map_count": 262144, } SYSTEMD_TMPFILES_CONFIGS = [ "z /proc/sched_debug 0400 - - -", "z /sys/kernel/slab 0700 - - -", ] REBOOT_REQUIRED_FILE = "/run/lxd-reboot-required" class LxdCharm(CharmBase): """LXD charm class.""" _stored = StoredState() def __init__(self, *args): """Initialize charm's variables.""" super().__init__(*args) # Initialize the persistent storage if needed self._stored.set_default( addresses={}, config={}, lxd_binary_path=None, lxd_initialized=False, lxd_installed=False, lxd_snap_path=None, reboot_required=False, ) # Action event handlers self.framework.observe( self.on.add_trusted_client_action, self._on_action_add_trusted_client ) self.framework.observe(self.on.debug_action, self._on_action_debug) self.framework.observe( self.on.show_pending_config_action, self._on_action_show_pending_config ) # Main event handlers self.framework.observe(self.on.install, self._on_charm_install) self.framework.observe(self.on.config_changed, self._on_charm_config_changed) self.framework.observe(self.on.start, self._on_charm_start) self.framework.observe(self.on.upgrade_charm, self._on_charm_upgrade) def _on_action_add_trusted_client(self, event: ActionEvent) -> None: """Add a client certificate to the trusted list.""" name = event.params.get("name", "unknown") cert = event.params.get("cert") cert_url = event.params.get("cert-url") projects = event.params.get("projects") if not cert and not cert_url: msg = "One of cert or cert-url parameter needs to be provided." event.fail(msg) logger.error(msg) return if cert: # The received PEM needs to be mangled to be able to split() # on spaces without breaking the "-----BEGIN CERTIFICATE-----" # and "-----END CERTIFICATE-----" lines cert = ( "\n".join(cert.replace(" CERTIFICATE", "CERTIFICATE", 2).split()) .replace("CERTIFICATE", " CERTIFICATE", 2) .encode() ) # Ignore the cert-url param if a cert was provided cert_url = None if cert_url and not (cert_url.startswith("http://") or cert_url.startswith("https://")): msg = 'The cert-url parameter needs to start with "http://" or "https://".' event.fail(msg) logger.error(msg) return if cert_url: try: response = urlopen(cert_url) except HTTPError as e: msg = f"The server couldn't fulfill the request. Error code: {e.code}" event.fail(msg) logger.error(msg) return except URLError as e: msg = f"We failed to reach a server. Reason: {e.reason}" event.fail(msg) logger.error(msg) return else: cert = response.read() if not cert: msg = "Invalid/empty certificate provided/retrieved." event.fail(msg) logger.error(msg) return cmd = ["lxc", "config", "trust", "add", "-", "--name", name] if projects: cmd += ["--restricted", "--projects", projects] try: subprocess.run(cmd, input=cert, check=True) except subprocess.CalledProcessError as e: msg = f'Failed to run "{e.cmd}": {e.returncode}' event.fail(msg) logger.error(msg) raise RuntimeError event.set_results({"result": "the client certificate is now trusted"}) def _on_action_debug(self, event: ActionEvent) -> None: """Collect information for a bug report.""" try: b = subprocess.run(["lxd.buginfo"], check=True) except subprocess.CalledProcessError as e: msg = f'Failed to run "{e.cmd}": {e.returncode}' event.fail(msg) logger.error(msg) raise RuntimeError event.set_results({"buginfo": b.stdout}) logger.debug("lxd.buginfo called successfully") def _on_action_show_pending_config(self, event: ActionEvent) -> None: """Show the currently pending configuration changes (queued for after the reboot).""" event.set_results({"pending": self.config_changed()}) def _on_charm_config_changed(self, event: ConfigChangedEvent) -> None: """React to configuration changes. Some configuration items can be set only once while others are changable, sometimes requiring a service reload or even a machine reboot. """ logger.info("Updating charm config") # Confirm that the config is valid if not self.config_is_valid(): return # Get all the configs that changed changed = self.config_changed() if not changed: logger.debug("No configuration changes to apply") return # Check if the required reboot occurred and clear the flag if yes if os.path.exists(REBOOT_REQUIRED_FILE): self.unit_blocked("Reboot required, deferring config change") event.defer() return # Check if any required reboot was done self.system_clear_reboot_required() # Apply all the configs that changed try: if "snap-channel" in changed: self.snap_install_lxd() elif "sysctl-tuning" in changed: self.kernel_sysctl() elif "kernel-hardening" in changed: self.kernel_hardening() elif [k for k in changed if k.startswith("snap-config-")]: self.snap_config_set() except RuntimeError: msg = "Failed to apply some configuration change(s): %s" % ", ".join(changed) self.unit_blocked(msg) event.defer() return # If some changes needed a reboot to take effect, enter blocked status if self._stored.reboot_required: self.unit_blocked("Machine reboot required") return # All done self.unit_active("Configuration change(s) applied successfully") def config_changed(self) -> dict: """Figure out what changed.""" new_config = self.config old_config = self._stored.config apply_config = {} for k, v in new_config.items(): if k not in old_config: apply_config[k] = v elif v != old_config[k]: apply_config[k] = v return apply_config def config_is_valid(self) -> bool: """Validate the config.""" if "local" in self.model.storages and len(self.model.storages["local"]) > 1: self.unit_blocked("LXD charm only supports a single storage volume") return False config_changed = self.config_changed() # If nothing changed and we were blocked due to a lxd- key # change (post-init), we can assume the change was reverted thus unblocking us if ( not config_changed and isinstance(self.unit.status, BlockedStatus) and "Can't modify lxd- keys after initialization:" in str(self.unit.status) ): self.unit_active("Unblocking as the lxd- keys were reset to their initial values") for k in config_changed: if k.startswith("lxd-") and self._stored.lxd_initialized: self.unit_blocked(f"Can't modify lxd- keys after initialization: {k}") return False return True def juju_set_proxy(self) -> None: """Apply proxy config.""" juju_proxy = "/etc/juju-proxy.conf" if not os.path.exists(juju_proxy): logger.debug("No proxy config from Juju.") return http_proxy = None https_proxy = None no_proxy = None with open(juju_proxy, encoding="UTF-8") as f: for line in f.read().splitlines(): # Only consider lines exporting variables if not line.startswith("export "): continue # Parse export lines try: # Strip "export " prefix and split variable/value k, v = line.replace("export ", "", 1).split("=", 1) except (IndexError, ValueError): continue if k == "HTTP_PROXY": http_proxy = v elif k == "HTTPS_PROXY": https_proxy = v elif k == "NO_PROXY": no_proxy = v try: if http_proxy: logger.debug(f"Configuring core.proxy_http={http_proxy}") subprocess.run(["lxc", "config", "set", "core.proxy_http", http_proxy], check=True) if https_proxy: logger.debug(f"Configuring core.proxy_https={https_proxy}") subprocess.run( ["lxc", "config", "set", "core.proxy_https", https_proxy], check=True, ) if no_proxy: logger.debug(f"Configuring core.proxy_ignore_hosts={no_proxy}") subprocess.run( ["lxc", "config", "set", "core.proxy_ignore_hosts", no_proxy], check=True, ) except subprocess.CalledProcessError as e: self.unit_blocked(f'Failed to run "{e.cmd}": {e.returncode}') raise RuntimeError def kernel_sysctl(self) -> None: """Apply sysctl tuning keys.""" logger.debug("Applying sysctl tuning") sysctl_file = "/etc/sysctl.d/60-lxd.conf" config = self.config["sysctl-tuning"] if config: self.unit_maintenance(f"Applying sysctl config file: {sysctl_file}") with open(sysctl_file, "w", encoding="UTF-8") as f: for k, v in SYSCTL_CONFIGS.items(): f.write(f"{k} = {v}\n") try: subprocess.run(["sysctl", "--quiet", "--load", sysctl_file], check=True) except subprocess.CalledProcessError as e: self.unit_blocked(f'Failed to run "{e.cmd}": {e.returncode}') raise RuntimeError elif os.path.exists(sysctl_file): self.unit_maintenance(f"Removing sysctl config file: {sysctl_file}") os.remove(sysctl_file) # Persist the configuration self._stored.config["sysctl-tuning"] = config def kernel_hardening(self) -> None: """Apply kernel hardening systemd tmpfiles.""" logger.debug("Applying kernel hardening") systemd_tmpfiles = "/etc/tmpfiles.d/lxd.conf" config = self.config["kernel-hardening"] if config: self.unit_maintenance(f"Applying kernel hardening config file: {systemd_tmpfiles}") with open(systemd_tmpfiles, "w", encoding="UTF-8") as f: f.write("\n".join(SYSTEMD_TMPFILES_CONFIGS) + "\n") try: subprocess.run(["systemd-tmpfiles", "--create"], check=True) except subprocess.CalledProcessError as e: self.unit_blocked(f'Failed to run "{e.cmd}": {e.returncode}') raise RuntimeError elif os.path.exists(systemd_tmpfiles): self.unit_maintenance(f"Removing kernel hardening config file: {systemd_tmpfiles}") os.remove(systemd_tmpfiles) # Persist the configuration self._stored.config["kernel-hardening"] = config def lxd_init(self) -> None: """Apply initial configuration of LXD.""" self.unit_maintenance("Initializing LXD in standalone mode") preseed = self.config.get("lxd-preseed") if preseed: self.unit_maintenance("Applying LXD preseed") try: # NOTE: When preseeding, no further configuration is applied. subprocess.run(["lxd", "init", "--preseed"], check=True, input=preseed.encode()) except subprocess.CalledProcessError as e: self.unit_blocked(f'Failed to run "{e.cmd}": {e.returncode}') raise RuntimeError else: self.unit_maintenance("Performing initial configuration") try: # Configure the storage if "local" in self.model.storages and len(self.model.storages["local"]) == 1: src = f"source={self.model.storages['local'][0].location}" self.unit_maintenance(f"Configuring external storage pool (zfs, {src})") subprocess.run(["lxc", "storage", "create", "local", "zfs", src], check=True) else: self.unit_maintenance("Configuring local storage pool (dir)") subprocess.run(["lxc", "storage", "create", "local", "dir"], check=True) subprocess.run( [ "lxc", "profile", "device", "add", "default", "root", "disk", "pool=local", "path=/", ], check=True, ) # Configure the network self.unit_maintenance("Configuring network bridge (lxdbr0)") subprocess.run(["lxc", "network", "create", "lxdbr0"], check=True) subprocess.run( [ "lxc", "profile", "device", "add", "default", "eth0", "nic", "network=lxdbr0", "name=eth0", ], check=True, ) except subprocess.CalledProcessError as e: self.unit_blocked(f'Failed to run "{e.cmd}": {e.returncode}') raise RuntimeError # Initial configuration of core.proxy_* keys self.juju_set_proxy() # Done with the initialization self._stored.config["lxd-preseed"] = preseed # Flag any `lxd-*` keys not handled, there should be none for k in self.config_changed(): if k.startswith("lxd-"): logger.error(f"lxd_init did not handle the key config named: {k}") def lxd_is_active(self) -> bool: """Indicate if the lxd daemon is active.""" c = subprocess.run( ["systemctl", "is-active", "--quiet", "snap.lxd.daemon.service"], check=False, ) return c.returncode == 0 def lxd_reload(self) -> None: """Reload the lxd daemon.""" self.unit_maintenance("Reloading LXD") try: # Avoid occasional race during startup where a reload could cause a failure subprocess.run(["lxd", "waitready", "--timeout=30"], check=False) # Start a monitor process and wait for it to exit due to the service # reloading and the old lxd process closing the monitor's socket. mon = subprocess.Popen( ["lxc", "monitor", "--type=nonexistent"], stderr=subprocess.DEVNULL ) subprocess.run(["systemctl", "reload", "snap.lxd.daemon.service"], check=True) mon.wait(timeout=600) except subprocess.TimeoutExpired: if not mon.returncode: mon.kill() self.unit_maintenance("Timeout while reloading the LXD service") raise RuntimeError except subprocess.CalledProcessError as e: self.unit_blocked(f'Failed to run "{e.cmd}": {e.returncode}') raise RuntimeError def resource_sideload(self) -> None: """Sideload resources.""" # Multi-arch support arch = os.uname().machine if arch == "x86_64": possible_archs = ["x86_64", "amd64"] else: possible_archs = [arch] # LXD snap lxd_snap_resource = None fname_suffix = ".snap" try: # Note: self._stored can only store simple data types (int/float/dict/list/etc) lxd_snap_resource = str(self.model.resources.fetch("lxd-snap")) except ModelError: pass tmp_dir = None if lxd_snap_resource and tarfile.is_tarfile(lxd_snap_resource): logger.debug(f"{lxd_snap_resource} is a tarball; unpacking") tmp_dir = tempfile.mkdtemp() tarball = tarfile.open(lxd_snap_resource) valid_names = {f"lxd_{x}{fname_suffix}" for x in possible_archs} for f in valid_names.intersection(tarball.getnames()): tarball.extract(f, path=tmp_dir) logger.debug(f"{f} was extracted from the tarball") self._stored.lxd_snap_path = f"{tmp_dir}/{f}" break else: logger.debug("Missing arch specific snap from tarball.") else: self._stored.lxd_snap_path = lxd_snap_resource if self._stored.lxd_snap_path: self.snap_sideload_lxd() if tmp_dir: os.remove(self._stored.lxd_snap_path) os.rmdir(tmp_dir) # LXD binary lxd_binary_resource = None fname_suffix = "" try: # Note: self._stored can only store simple data types (int/float/dict/list/etc) lxd_binary_resource = str(self.model.resources.fetch("lxd-binary")) except ModelError: pass tmp_dir = None if lxd_binary_resource and tarfile.is_tarfile(lxd_binary_resource): logger.debug(f"{lxd_binary_resource} is a tarball; unpacking") tmp_dir = tempfile.mkdtemp() tarball = tarfile.open(lxd_binary_resource) valid_names = {f"lxd_{x}{fname_suffix}" for x in possible_archs} for f in valid_names.intersection(tarball.getnames()): tarball.extract(f, path=tmp_dir) logger.debug(f"{f} was extracted from the tarball") self._stored.lxd_binary_path = f"{tmp_dir}/{f}" break else: logger.debug("Missing arch specific binary from tarball.") else: self._stored.lxd_binary_path = lxd_binary_resource if self._stored.lxd_binary_path: self.snap_sideload_lxd_binary() if tmp_dir: os.remove(self._stored.lxd_binary_path) os.rmdir(tmp_dir) def snap_config_set(self) -> None: """Apply snap set to LXD.""" logger.debug("Applying snap set lxd") # Get all the `snap-config-*` configs to set snap_set = {k: v for k, v in self.config_changed().items() if k.startswith("snap-config-")} if not snap_set: return # Apply the changes snap_set_list = [] reboot_needed = False for k, v in snap_set.items(): # Convert Juju config to "snap set" syntax if v is None: snap_value = "" elif isinstance(v, bool): snap_value = str(v).lower() else: snap_value = v snap_key = k.replace("snap-config-", "", 1).replace("-", ".") # Set the snap config snap_set_list.append("%s=%s" % (snap_key, snap_value)) # Keys that require a reboot if k.startswith("snap-config-lxcfs-"): # `snap-config-lxcfs-*` cannot be applied live reboot_needed = True continue self.unit_maintenance("Setting snap configuration(s): " + ", ".join(snap_set_list)) try: subprocess.run(["snap", "set", "lxd"] + snap_set_list, check=True) except subprocess.CalledProcessError as e: self.unit_blocked(f'Failed to run "{e.cmd}": {e.returncode}') raise RuntimeError # If "snap set lxd" was successful: save all the k/v applied for k, v in snap_set.items(): self._stored.config[k] = v if not self.lxd_is_active(): logger.debug("Skipping LXD reload (service inactive)") return self.lxd_reload() if reboot_needed: self.system_set_reboot_required() def snap_install_lxd(self) -> None: """Install LXD from snap.""" channel = self.config["snap-channel"] if channel: channel_name = channel else: channel_name = "latest/stable" self.unit_maintenance(f"Installing LXD snap (channel={channel_name})") try: subprocess.run(["snap", "install", "lxd", f"--channel={channel}"], check=True) subprocess.run(["snap", "refresh", "lxd", f"--channel={channel}"], check=True) if os.path.exists("/var/lib/lxd"): subprocess.run(["lxd.migrate", "-yes"], check=True) except subprocess.CalledProcessError as e: self.unit_blocked(f'Failed to run "{e.cmd}": {e.returncode}') raise RuntimeError # Done with the snap installation self._stored.config["snap-channel"] = channel def snap_sideload_lxd(self) -> None: """Sideload LXD snap resource.""" logger.debug("Applying LXD snap sideload changes") # A 0 byte file will unload the resource if os.path.getsize(self._stored.lxd_snap_path) == 0: logger.debug("Reverting to LXD snap from snapstore") channel = self._stored.config["snap-channel"] cmd = ["snap", "refresh", "lxd", f"--channel={channel}", "--amend"] alias = None enable = None else: logger.debug("Sideloading LXD snap") cmd = ["snap", "install", "--dangerous", self._stored.lxd_snap_path] # Since the sideloaded snap doesn't have an assertion, some things need # to be done manually alias = ["snap", "alias", "lxd.lxc", "lxc"] enable = ["systemctl", "enable", "--now", "snap.lxd.daemon.unix.socket"] try: subprocess.run(cmd, check=True) if alias: subprocess.run(alias, check=True) if enable: subprocess.run(enable, check=True) except subprocess.CalledProcessError as e: self.unit_blocked(f'Failed to run "{e.cmd}": {e.returncode}') raise RuntimeError def snap_sideload_lxd_binary(self) -> None: """Sideload LXD binary resource.""" logger.debug("Applying LXD binary sideload changes") lxd_debug = "/var/snap/lxd/common/lxd.debug" # A 0 byte file will unload the resource if os.path.getsize(self._stored.lxd_binary_path) == 0: logger.debug("Unloading sideloaded LXD binary") if os.path.exists(lxd_debug): os.remove(lxd_debug) else: logger.debug("Sideloading LXD binary") # Avoid "Text file busy" error if os.path.exists(lxd_debug): logger.debug("Removing old sideloaded LXD binary") os.remove(lxd_debug) shutil.copyfile(self._stored.lxd_binary_path, lxd_debug) os.chmod(lxd_debug, 0o755) self.lxd_reload() def system_clear_reboot_required(self) -> None: """Clear the reboot_required flag if a reboot occurred.""" # If the required reboot occurred so let's clear the flag if self._stored.reboot_required and not os.path.exists(REBOOT_REQUIRED_FILE): self._stored.reboot_required = False logger.debug("Required reboot done") def system_set_reboot_required(self) -> None: """Indicate that a reboot is required to reach a clean state.""" # Touch a flag file indicating that a reboot is required. try: open(REBOOT_REQUIRED_FILE, "a").close() self._stored.reboot_required = True except OSError: logger.warning(f"Failed to create: {REBOOT_REQUIRED_FILE}") def unit_active(self, msg: str = "") -> None: """Set the unit's status to active and log the provided message, if any.""" self.unit.status = ActiveStatus() if msg: logger.debug(msg) def unit_blocked(self, msg: str) -> None: """Set the unit's status to blocked and log the provided message.""" self.unit.status = BlockedStatus(msg) logger.error(msg) def unit_maintenance(self, msg: str) -> None: """Set the unit's status to maintenance and log the provided message.""" self.unit.status = MaintenanceStatus(msg) logger.info(msg) if __name__ == "__main__": main(LxdCharm)
37.576774
99
0.571836
#!/usr/bin/env python3 """LXD charm.""" import logging import os import shutil import subprocess import tarfile import tempfile from urllib.error import HTTPError, URLError from urllib.request import urlopen from ops.charm import ( ActionEvent, CharmBase, ConfigChangedEvent, InstallEvent, StartEvent, ) from ops.framework import StoredState from ops.main import main from ops.model import ActiveStatus, BlockedStatus, MaintenanceStatus, ModelError logger = logging.getLogger(__name__) SYSCTL_CONFIGS = { "fs.aio-max-nr": 524288, "fs.inotify.max_queued_events": 1048576, "fs.inotify.max_user_instances": 1048576, "fs.inotify.max_user_watches": 1048576, "kernel.dmesg_restrict": 1, "kernel.keys.maxbytes": 2000000, "kernel.keys.maxkeys": 2000, "net.core.bpf_jit_limit": 3000000000, "net.ipv4.neigh.default.gc_thresh3": 8192, "net.ipv6.neigh.default.gc_thresh3": 8192, "vm.max_map_count": 262144, } SYSTEMD_TMPFILES_CONFIGS = [ "z /proc/sched_debug 0400 - - -", "z /sys/kernel/slab 0700 - - -", ] REBOOT_REQUIRED_FILE = "/run/lxd-reboot-required" class LxdCharm(CharmBase): """LXD charm class.""" _stored = StoredState() def __init__(self, *args): """Initialize charm's variables.""" super().__init__(*args) # Initialize the persistent storage if needed self._stored.set_default( addresses={}, config={}, lxd_binary_path=None, lxd_initialized=False, lxd_installed=False, lxd_snap_path=None, reboot_required=False, ) # Action event handlers self.framework.observe( self.on.add_trusted_client_action, self._on_action_add_trusted_client ) self.framework.observe(self.on.debug_action, self._on_action_debug) self.framework.observe( self.on.show_pending_config_action, self._on_action_show_pending_config ) # Main event handlers self.framework.observe(self.on.install, self._on_charm_install) self.framework.observe(self.on.config_changed, self._on_charm_config_changed) self.framework.observe(self.on.start, self._on_charm_start) self.framework.observe(self.on.upgrade_charm, self._on_charm_upgrade) def _on_action_add_trusted_client(self, event: ActionEvent) -> None: """Add a client certificate to the trusted list.""" name = event.params.get("name", "unknown") cert = event.params.get("cert") cert_url = event.params.get("cert-url") projects = event.params.get("projects") if not cert and not cert_url: msg = "One of cert or cert-url parameter needs to be provided." event.fail(msg) logger.error(msg) return if cert: # The received PEM needs to be mangled to be able to split() # on spaces without breaking the "-----BEGIN CERTIFICATE-----" # and "-----END CERTIFICATE-----" lines cert = ( "\n".join(cert.replace(" CERTIFICATE", "CERTIFICATE", 2).split()) .replace("CERTIFICATE", " CERTIFICATE", 2) .encode() ) # Ignore the cert-url param if a cert was provided cert_url = None if cert_url and not (cert_url.startswith("http://") or cert_url.startswith("https://")): msg = 'The cert-url parameter needs to start with "http://" or "https://".' event.fail(msg) logger.error(msg) return if cert_url: try: response = urlopen(cert_url) except HTTPError as e: msg = f"The server couldn't fulfill the request. Error code: {e.code}" event.fail(msg) logger.error(msg) return except URLError as e: msg = f"We failed to reach a server. Reason: {e.reason}" event.fail(msg) logger.error(msg) return else: cert = response.read() if not cert: msg = "Invalid/empty certificate provided/retrieved." event.fail(msg) logger.error(msg) return cmd = ["lxc", "config", "trust", "add", "-", "--name", name] if projects: cmd += ["--restricted", "--projects", projects] try: subprocess.run(cmd, input=cert, check=True) except subprocess.CalledProcessError as e: msg = f'Failed to run "{e.cmd}": {e.returncode}' event.fail(msg) logger.error(msg) raise RuntimeError event.set_results({"result": "the client certificate is now trusted"}) def _on_action_debug(self, event: ActionEvent) -> None: """Collect information for a bug report.""" try: b = subprocess.run(["lxd.buginfo"], check=True) except subprocess.CalledProcessError as e: msg = f'Failed to run "{e.cmd}": {e.returncode}' event.fail(msg) logger.error(msg) raise RuntimeError event.set_results({"buginfo": b.stdout}) logger.debug("lxd.buginfo called successfully") def _on_action_show_pending_config(self, event: ActionEvent) -> None: """Show the currently pending configuration changes (queued for after the reboot).""" event.set_results({"pending": self.config_changed()}) def _on_charm_install(self, event: InstallEvent) -> None: logger.info("Installing the LXD charm") # Confirm that the config is valid if not self.config_is_valid(): return # Install LXD itself try: self.snap_install_lxd() self._stored.lxd_installed = True logger.info("LXD installed successfully") except RuntimeError: logger.error("Failed to install LXD") event.defer() return # Apply various configs self.snap_config_set() self.kernel_sysctl() self.kernel_hardening() # Initial configuration try: self.lxd_init() self._stored.lxd_initialized = True logger.info("LXD initialized successfully") except RuntimeError: logger.error("Failed to initialize LXD") event.defer() return # Apply sideloaded resources attached at deploy time self.resource_sideload() # All done self.unit_active() def _on_charm_config_changed(self, event: ConfigChangedEvent) -> None: """React to configuration changes. Some configuration items can be set only once while others are changable, sometimes requiring a service reload or even a machine reboot. """ logger.info("Updating charm config") # Confirm that the config is valid if not self.config_is_valid(): return # Get all the configs that changed changed = self.config_changed() if not changed: logger.debug("No configuration changes to apply") return # Check if the required reboot occurred and clear the flag if yes if os.path.exists(REBOOT_REQUIRED_FILE): self.unit_blocked("Reboot required, deferring config change") event.defer() return # Check if any required reboot was done self.system_clear_reboot_required() # Apply all the configs that changed try: if "snap-channel" in changed: self.snap_install_lxd() elif "sysctl-tuning" in changed: self.kernel_sysctl() elif "kernel-hardening" in changed: self.kernel_hardening() elif [k for k in changed if k.startswith("snap-config-")]: self.snap_config_set() except RuntimeError: msg = "Failed to apply some configuration change(s): %s" % ", ".join(changed) self.unit_blocked(msg) event.defer() return # If some changes needed a reboot to take effect, enter blocked status if self._stored.reboot_required: self.unit_blocked("Machine reboot required") return # All done self.unit_active("Configuration change(s) applied successfully") def _on_charm_start(self, event: StartEvent) -> None: logger.info("Starting the LXD charm") if not self._stored.lxd_initialized: logger.debug("LXD is not initialized yet, not starting the charm") return # Check if any required reboot was done self.system_clear_reboot_required() if not self._stored.reboot_required and isinstance(self.unit.status, BlockedStatus): self.unit_active("Pending configuration changes were applied during the last reboot") # Apply pending config changes (those were likely queued up while the unit was # down/rebooting) if self.config_changed(): logger.debug("Pending config changes detected") self._on_charm_config_changed(event) def _on_charm_upgrade(self, _): logger.info("Upgrading the LXD charm") # Nothing to do if LXD is not initialized yet if not self._stored.lxd_initialized: logger.info("Nothing to upgrade as LXD is not initialized yet") return # If LXD was initialized and new `lxd-*` keys are introduced on # upgrades, those will need to be taken as-is (with a warning) because # they would otherwise cause failures during the config-changed event # following the upgrade-charm event. changed = self.config_changed() for k, v in changed.items(): if k.startswith("lxd-"): logger.warning( f'The new "{k}" key won\'t be applied to existing units ' "as their LXD is already initialized" ) self._stored.config[k] = v # Apply sideloaded resources attached after deployment self.resource_sideload() def config_changed(self) -> dict: """Figure out what changed.""" new_config = self.config old_config = self._stored.config apply_config = {} for k, v in new_config.items(): if k not in old_config: apply_config[k] = v elif v != old_config[k]: apply_config[k] = v return apply_config def config_is_valid(self) -> bool: """Validate the config.""" if "local" in self.model.storages and len(self.model.storages["local"]) > 1: self.unit_blocked("LXD charm only supports a single storage volume") return False config_changed = self.config_changed() # If nothing changed and we were blocked due to a lxd- key # change (post-init), we can assume the change was reverted thus unblocking us if ( not config_changed and isinstance(self.unit.status, BlockedStatus) and "Can't modify lxd- keys after initialization:" in str(self.unit.status) ): self.unit_active("Unblocking as the lxd- keys were reset to their initial values") for k in config_changed: if k.startswith("lxd-") and self._stored.lxd_initialized: self.unit_blocked(f"Can't modify lxd- keys after initialization: {k}") return False return True def juju_set_proxy(self) -> None: """Apply proxy config.""" juju_proxy = "/etc/juju-proxy.conf" if not os.path.exists(juju_proxy): logger.debug("No proxy config from Juju.") return http_proxy = None https_proxy = None no_proxy = None with open(juju_proxy, encoding="UTF-8") as f: for line in f.read().splitlines(): # Only consider lines exporting variables if not line.startswith("export "): continue # Parse export lines try: # Strip "export " prefix and split variable/value k, v = line.replace("export ", "", 1).split("=", 1) except (IndexError, ValueError): continue if k == "HTTP_PROXY": http_proxy = v elif k == "HTTPS_PROXY": https_proxy = v elif k == "NO_PROXY": no_proxy = v try: if http_proxy: logger.debug(f"Configuring core.proxy_http={http_proxy}") subprocess.run(["lxc", "config", "set", "core.proxy_http", http_proxy], check=True) if https_proxy: logger.debug(f"Configuring core.proxy_https={https_proxy}") subprocess.run( ["lxc", "config", "set", "core.proxy_https", https_proxy], check=True, ) if no_proxy: logger.debug(f"Configuring core.proxy_ignore_hosts={no_proxy}") subprocess.run( ["lxc", "config", "set", "core.proxy_ignore_hosts", no_proxy], check=True, ) except subprocess.CalledProcessError as e: self.unit_blocked(f'Failed to run "{e.cmd}": {e.returncode}') raise RuntimeError def kernel_sysctl(self) -> None: """Apply sysctl tuning keys.""" logger.debug("Applying sysctl tuning") sysctl_file = "/etc/sysctl.d/60-lxd.conf" config = self.config["sysctl-tuning"] if config: self.unit_maintenance(f"Applying sysctl config file: {sysctl_file}") with open(sysctl_file, "w", encoding="UTF-8") as f: for k, v in SYSCTL_CONFIGS.items(): f.write(f"{k} = {v}\n") try: subprocess.run(["sysctl", "--quiet", "--load", sysctl_file], check=True) except subprocess.CalledProcessError as e: self.unit_blocked(f'Failed to run "{e.cmd}": {e.returncode}') raise RuntimeError elif os.path.exists(sysctl_file): self.unit_maintenance(f"Removing sysctl config file: {sysctl_file}") os.remove(sysctl_file) # Persist the configuration self._stored.config["sysctl-tuning"] = config def kernel_hardening(self) -> None: """Apply kernel hardening systemd tmpfiles.""" logger.debug("Applying kernel hardening") systemd_tmpfiles = "/etc/tmpfiles.d/lxd.conf" config = self.config["kernel-hardening"] if config: self.unit_maintenance(f"Applying kernel hardening config file: {systemd_tmpfiles}") with open(systemd_tmpfiles, "w", encoding="UTF-8") as f: f.write("\n".join(SYSTEMD_TMPFILES_CONFIGS) + "\n") try: subprocess.run(["systemd-tmpfiles", "--create"], check=True) except subprocess.CalledProcessError as e: self.unit_blocked(f'Failed to run "{e.cmd}": {e.returncode}') raise RuntimeError elif os.path.exists(systemd_tmpfiles): self.unit_maintenance(f"Removing kernel hardening config file: {systemd_tmpfiles}") os.remove(systemd_tmpfiles) # Persist the configuration self._stored.config["kernel-hardening"] = config def lxd_init(self) -> None: """Apply initial configuration of LXD.""" self.unit_maintenance("Initializing LXD in standalone mode") preseed = self.config.get("lxd-preseed") if preseed: self.unit_maintenance("Applying LXD preseed") try: # NOTE: When preseeding, no further configuration is applied. subprocess.run(["lxd", "init", "--preseed"], check=True, input=preseed.encode()) except subprocess.CalledProcessError as e: self.unit_blocked(f'Failed to run "{e.cmd}": {e.returncode}') raise RuntimeError else: self.unit_maintenance("Performing initial configuration") try: # Configure the storage if "local" in self.model.storages and len(self.model.storages["local"]) == 1: src = f"source={self.model.storages['local'][0].location}" self.unit_maintenance(f"Configuring external storage pool (zfs, {src})") subprocess.run(["lxc", "storage", "create", "local", "zfs", src], check=True) else: self.unit_maintenance("Configuring local storage pool (dir)") subprocess.run(["lxc", "storage", "create", "local", "dir"], check=True) subprocess.run( [ "lxc", "profile", "device", "add", "default", "root", "disk", "pool=local", "path=/", ], check=True, ) # Configure the network self.unit_maintenance("Configuring network bridge (lxdbr0)") subprocess.run(["lxc", "network", "create", "lxdbr0"], check=True) subprocess.run( [ "lxc", "profile", "device", "add", "default", "eth0", "nic", "network=lxdbr0", "name=eth0", ], check=True, ) except subprocess.CalledProcessError as e: self.unit_blocked(f'Failed to run "{e.cmd}": {e.returncode}') raise RuntimeError # Initial configuration of core.proxy_* keys self.juju_set_proxy() # Done with the initialization self._stored.config["lxd-preseed"] = preseed # Flag any `lxd-*` keys not handled, there should be none for k in self.config_changed(): if k.startswith("lxd-"): logger.error(f"lxd_init did not handle the key config named: {k}") def lxd_is_active(self) -> bool: """Indicate if the lxd daemon is active.""" c = subprocess.run( ["systemctl", "is-active", "--quiet", "snap.lxd.daemon.service"], check=False, ) return c.returncode == 0 def lxd_reload(self) -> None: """Reload the lxd daemon.""" self.unit_maintenance("Reloading LXD") try: # Avoid occasional race during startup where a reload could cause a failure subprocess.run(["lxd", "waitready", "--timeout=30"], check=False) # Start a monitor process and wait for it to exit due to the service # reloading and the old lxd process closing the monitor's socket. mon = subprocess.Popen( ["lxc", "monitor", "--type=nonexistent"], stderr=subprocess.DEVNULL ) subprocess.run(["systemctl", "reload", "snap.lxd.daemon.service"], check=True) mon.wait(timeout=600) except subprocess.TimeoutExpired: if not mon.returncode: mon.kill() self.unit_maintenance("Timeout while reloading the LXD service") raise RuntimeError except subprocess.CalledProcessError as e: self.unit_blocked(f'Failed to run "{e.cmd}": {e.returncode}') raise RuntimeError def resource_sideload(self) -> None: """Sideload resources.""" # Multi-arch support arch = os.uname().machine if arch == "x86_64": possible_archs = ["x86_64", "amd64"] else: possible_archs = [arch] # LXD snap lxd_snap_resource = None fname_suffix = ".snap" try: # Note: self._stored can only store simple data types (int/float/dict/list/etc) lxd_snap_resource = str(self.model.resources.fetch("lxd-snap")) except ModelError: pass tmp_dir = None if lxd_snap_resource and tarfile.is_tarfile(lxd_snap_resource): logger.debug(f"{lxd_snap_resource} is a tarball; unpacking") tmp_dir = tempfile.mkdtemp() tarball = tarfile.open(lxd_snap_resource) valid_names = {f"lxd_{x}{fname_suffix}" for x in possible_archs} for f in valid_names.intersection(tarball.getnames()): tarball.extract(f, path=tmp_dir) logger.debug(f"{f} was extracted from the tarball") self._stored.lxd_snap_path = f"{tmp_dir}/{f}" break else: logger.debug("Missing arch specific snap from tarball.") else: self._stored.lxd_snap_path = lxd_snap_resource if self._stored.lxd_snap_path: self.snap_sideload_lxd() if tmp_dir: os.remove(self._stored.lxd_snap_path) os.rmdir(tmp_dir) # LXD binary lxd_binary_resource = None fname_suffix = "" try: # Note: self._stored can only store simple data types (int/float/dict/list/etc) lxd_binary_resource = str(self.model.resources.fetch("lxd-binary")) except ModelError: pass tmp_dir = None if lxd_binary_resource and tarfile.is_tarfile(lxd_binary_resource): logger.debug(f"{lxd_binary_resource} is a tarball; unpacking") tmp_dir = tempfile.mkdtemp() tarball = tarfile.open(lxd_binary_resource) valid_names = {f"lxd_{x}{fname_suffix}" for x in possible_archs} for f in valid_names.intersection(tarball.getnames()): tarball.extract(f, path=tmp_dir) logger.debug(f"{f} was extracted from the tarball") self._stored.lxd_binary_path = f"{tmp_dir}/{f}" break else: logger.debug("Missing arch specific binary from tarball.") else: self._stored.lxd_binary_path = lxd_binary_resource if self._stored.lxd_binary_path: self.snap_sideload_lxd_binary() if tmp_dir: os.remove(self._stored.lxd_binary_path) os.rmdir(tmp_dir) def snap_config_set(self) -> None: """Apply snap set to LXD.""" logger.debug("Applying snap set lxd") # Get all the `snap-config-*` configs to set snap_set = {k: v for k, v in self.config_changed().items() if k.startswith("snap-config-")} if not snap_set: return # Apply the changes snap_set_list = [] reboot_needed = False for k, v in snap_set.items(): # Convert Juju config to "snap set" syntax if v is None: snap_value = "" elif isinstance(v, bool): snap_value = str(v).lower() else: snap_value = v snap_key = k.replace("snap-config-", "", 1).replace("-", ".") # Set the snap config snap_set_list.append("%s=%s" % (snap_key, snap_value)) # Keys that require a reboot if k.startswith("snap-config-lxcfs-"): # `snap-config-lxcfs-*` cannot be applied live reboot_needed = True continue self.unit_maintenance("Setting snap configuration(s): " + ", ".join(snap_set_list)) try: subprocess.run(["snap", "set", "lxd"] + snap_set_list, check=True) except subprocess.CalledProcessError as e: self.unit_blocked(f'Failed to run "{e.cmd}": {e.returncode}') raise RuntimeError # If "snap set lxd" was successful: save all the k/v applied for k, v in snap_set.items(): self._stored.config[k] = v if not self.lxd_is_active(): logger.debug("Skipping LXD reload (service inactive)") return self.lxd_reload() if reboot_needed: self.system_set_reboot_required() def snap_install_lxd(self) -> None: """Install LXD from snap.""" channel = self.config["snap-channel"] if channel: channel_name = channel else: channel_name = "latest/stable" self.unit_maintenance(f"Installing LXD snap (channel={channel_name})") try: subprocess.run(["snap", "install", "lxd", f"--channel={channel}"], check=True) subprocess.run(["snap", "refresh", "lxd", f"--channel={channel}"], check=True) if os.path.exists("/var/lib/lxd"): subprocess.run(["lxd.migrate", "-yes"], check=True) except subprocess.CalledProcessError as e: self.unit_blocked(f'Failed to run "{e.cmd}": {e.returncode}') raise RuntimeError # Done with the snap installation self._stored.config["snap-channel"] = channel def snap_sideload_lxd(self) -> None: """Sideload LXD snap resource.""" logger.debug("Applying LXD snap sideload changes") # A 0 byte file will unload the resource if os.path.getsize(self._stored.lxd_snap_path) == 0: logger.debug("Reverting to LXD snap from snapstore") channel = self._stored.config["snap-channel"] cmd = ["snap", "refresh", "lxd", f"--channel={channel}", "--amend"] alias = None enable = None else: logger.debug("Sideloading LXD snap") cmd = ["snap", "install", "--dangerous", self._stored.lxd_snap_path] # Since the sideloaded snap doesn't have an assertion, some things need # to be done manually alias = ["snap", "alias", "lxd.lxc", "lxc"] enable = ["systemctl", "enable", "--now", "snap.lxd.daemon.unix.socket"] try: subprocess.run(cmd, check=True) if alias: subprocess.run(alias, check=True) if enable: subprocess.run(enable, check=True) except subprocess.CalledProcessError as e: self.unit_blocked(f'Failed to run "{e.cmd}": {e.returncode}') raise RuntimeError def snap_sideload_lxd_binary(self) -> None: """Sideload LXD binary resource.""" logger.debug("Applying LXD binary sideload changes") lxd_debug = "/var/snap/lxd/common/lxd.debug" # A 0 byte file will unload the resource if os.path.getsize(self._stored.lxd_binary_path) == 0: logger.debug("Unloading sideloaded LXD binary") if os.path.exists(lxd_debug): os.remove(lxd_debug) else: logger.debug("Sideloading LXD binary") # Avoid "Text file busy" error if os.path.exists(lxd_debug): logger.debug("Removing old sideloaded LXD binary") os.remove(lxd_debug) shutil.copyfile(self._stored.lxd_binary_path, lxd_debug) os.chmod(lxd_debug, 0o755) self.lxd_reload() def system_clear_reboot_required(self) -> None: """Clear the reboot_required flag if a reboot occurred.""" # If the required reboot occurred so let's clear the flag if self._stored.reboot_required and not os.path.exists(REBOOT_REQUIRED_FILE): self._stored.reboot_required = False logger.debug("Required reboot done") def system_set_reboot_required(self) -> None: """Indicate that a reboot is required to reach a clean state.""" # Touch a flag file indicating that a reboot is required. try: open(REBOOT_REQUIRED_FILE, "a").close() self._stored.reboot_required = True except OSError: logger.warning(f"Failed to create: {REBOOT_REQUIRED_FILE}") def unit_active(self, msg: str = "") -> None: """Set the unit's status to active and log the provided message, if any.""" self.unit.status = ActiveStatus() if msg: logger.debug(msg) def unit_blocked(self, msg: str) -> None: """Set the unit's status to blocked and log the provided message.""" self.unit.status = BlockedStatus(msg) logger.error(msg) def unit_maintenance(self, msg: str) -> None: """Set the unit's status to maintenance and log the provided message.""" self.unit.status = MaintenanceStatus(msg) logger.info(msg) if __name__ == "__main__": main(LxdCharm)
2,791
0
81
e78f3c1f96b6fb488ed74ffb65acfec6dce420a4
5,332
py
Python
textube.py
mmjakub/textube
118db7a6734e4484ce9d33916322a912277086ac
[ "BSD-2-Clause" ]
null
null
null
textube.py
mmjakub/textube
118db7a6734e4484ce9d33916322a912277086ac
[ "BSD-2-Clause" ]
null
null
null
textube.py
mmjakub/textube
118db7a6734e4484ce9d33916322a912277086ac
[ "BSD-2-Clause" ]
null
null
null
#!/usr/bin/env python3.9 import base64 import html from http.client import HTTPSConnection import json import threading import urllib.parse if __name__ == "__main__": import argparse import concurrent.futures as cofu import logging import pathlib import sys parser = argparse.ArgumentParser( description='Download subtitles for a youtube channel/playlist') parser.add_argument('-v', '--verbose', action='store_true') parser.add_argument('-q', '--quiet', action='store_true') parser.add_argument('-l', '--list', action='store_true', help='only list target IDs') parser.add_argument('-o', '--output-dir', type=pathlib.Path, default='.', metavar='path', help='output destination, otherwise current directory') parser.add_argument('-c', '--connections', type=int, default=10, metavar='count', help='number of parallel connections to use') parser.add_argument('ids', nargs='+', metavar='ID', help='channel or playlist id') args = parser.parse_args() logger = logging.getLogger('root') logger.setLevel(logging.DEBUG if args.verbose else logging.ERROR if args.quiet else logging.INFO) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) logger.addHandler(ch) logger.debug(f'saving subtitles in: {args.output_dir.resolve()}') logger.debug(f'using {args.connections} connections') logger.debug(f'getting videos from: {" ".join(args.ids)}') ids = (e for plId in args.ids for e in fetch_items(plId, selector=lambda e: e['videoId'])) with cofu.ThreadPoolExecutor(args.connections, initializer=ConnectionManager.threadInit) as executor: future_id = {executor.submit(get_captions_from_config, vid) : vid for vid in ids} for future in cofu.as_completed(future_id): videoId = future_id[future] text = future.result() if text is not None: logger.debug(f'saving subtitles for {videoId}') with open(args.output_dir / videoId, 'w') as f: f.write(text) else: logger.warning(f'no subs for {videoId}')
37.027778
111
0.67123
#!/usr/bin/env python3.9 import base64 import html from http.client import HTTPSConnection import json import threading import urllib.parse class ConnectionManager(): _thread_local = threading.local() _thread_local.gapis = HTTPSConnection('www.googleapis.com') _thread_local.yt = HTTPSConnection('www.youtube-nocookie.com') @classmethod def threadInit(cls): cls._thread_local.gapis = HTTPSConnection('www.googleapis.com') cls._thread_local.yt = HTTPSConnection('www.youtube-nocookie.com') @classmethod def closeAll(cls): cls._thread_local.gapis.close() cls._thread_local.yt.close() @classmethod def yt(cls): return cls._thread_local.yt @classmethod def gapis(cls): return cls._thread_local.gapis def yt_api_get(endpoint, **params): params.update(key='AIzaSyAa8yy0GdcGPHdtD083HiGGx_S0vMPScDM') gapis = ConnectionManager.gapis() gapis.request('GET', f'/youtube/v3/{endpoint}?{"&".join(f"{k}={v}" for k,v in params.items())}', headers={ 'X-Origin': 'https://explorer.apis.google.com', 'X-Referer': 'https://explorer.apis.google.com'}) return json.loads(gapis.getresponse().read()) def itube_api(endpoint, **kwargs): data = {'context': {'client': {'clientName': 'WEB', 'clientVersion': '2.00000101'}}} data.update(kwargs) yt = ConnectionManager.yt() yt.request('POST', f'https://www.youtube-nocookie.com/youtubei/v1/{endpoint}?key=AIzaSyAO_FJ2SlqU8Q4STEHLGCilw_Y9_11qcW8', body=json.dumps(data)) return json.loads(yt.getresponse().read()) def yt_get(url): yt = ConnectionManager.yt() yt.request('GET', url) body = str(yt.getresponse().read()) return html.unescape(html.unescape(body)) def channel2uploads(id): return 'UU' + id[2:] if id.startswith('UC') else id def flatten_cue(cue): cue = cue['transcriptCueGroupRenderer'] return cue['formattedStartOffset']['simpleText'],\ cue['cues'][0]['transcriptCueRenderer']['cue']['simpleText'] def fetch_videoIds(playlistId): return list(fetch_items(playlistId, selector=lambda e: e['videoId'])) def fetch_items(playlistId, part='contentDetails', selector=lambda e: e, token=None): params = {} if token is None else {'pageToken': token} data = yt_api_get('playlistItems', part=part, maxResults=50, playlistId=playlistId, **params) yield from (selector(e[part]) for e in data['items']) if (token := data.get('nextPageToken')) is not None: yield from fetch_items(playlistId, part, selector, token) def get_transcript(videoId): params = urllib.parse.quote(base64.b64encode(bytes([10,11]) + videoId.encode())) r_json = itube_api('get_transcript', params=params) try: cues = r_json['actions'][0]['updateEngagementPanelAction']['content']\ ['transcriptRenderer']['body']['transcriptBodyRenderer']['cueGroups'] except (KeyError): return [] return list(map(flatten_cue, cues)) def get_captions_from_config(videoId, fmt=None): params = {} if fmt is None else {'fmt': fmt} config = itube_api('player', videoId=videoId) try: return yt_get(config['captions']['playerCaptionsTracklistRenderer']['captionTracks'][0]['baseUrl']) except KeyError: return None if __name__ == "__main__": import argparse import concurrent.futures as cofu import logging import pathlib import sys parser = argparse.ArgumentParser( description='Download subtitles for a youtube channel/playlist') parser.add_argument('-v', '--verbose', action='store_true') parser.add_argument('-q', '--quiet', action='store_true') parser.add_argument('-l', '--list', action='store_true', help='only list target IDs') parser.add_argument('-o', '--output-dir', type=pathlib.Path, default='.', metavar='path', help='output destination, otherwise current directory') parser.add_argument('-c', '--connections', type=int, default=10, metavar='count', help='number of parallel connections to use') parser.add_argument('ids', nargs='+', metavar='ID', help='channel or playlist id') args = parser.parse_args() logger = logging.getLogger('root') logger.setLevel(logging.DEBUG if args.verbose else logging.ERROR if args.quiet else logging.INFO) ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) logger.addHandler(ch) logger.debug(f'saving subtitles in: {args.output_dir.resolve()}') logger.debug(f'using {args.connections} connections') logger.debug(f'getting videos from: {" ".join(args.ids)}') ids = (e for plId in args.ids for e in fetch_items(plId, selector=lambda e: e['videoId'])) with cofu.ThreadPoolExecutor(args.connections, initializer=ConnectionManager.threadInit) as executor: future_id = {executor.submit(get_captions_from_config, vid) : vid for vid in ids} for future in cofu.as_completed(future_id): videoId = future_id[future] text = future.result() if text is not None: logger.debug(f'saving subtitles for {videoId}') with open(args.output_dir / videoId, 'w') as f: f.write(text) else: logger.warning(f'no subs for {videoId}')
2,587
350
234
fd6f0427dc3f1f96809c38df7ca11b783c62bcca
6,777
py
Python
main.py
codewithlennylen/GAS-LAWS
1420d2e68d029d2fc137cc909b5398e67be4f0c6
[ "MIT" ]
null
null
null
main.py
codewithlennylen/GAS-LAWS
1420d2e68d029d2fc137cc909b5398e67be4f0c6
[ "MIT" ]
null
null
null
main.py
codewithlennylen/GAS-LAWS
1420d2e68d029d2fc137cc909b5398e67be4f0c6
[ "MIT" ]
null
null
null
import pygame import random pygame.init() WIDTH, HEIGHT = 960, 700 SCREEN = pygame.display.set_mode((WIDTH, HEIGHT)) WHITE = (255, 255, 255) BLACK = (0, 0, 0) RED = (255, 0, 0) BLUE = (0, 0, 255) CLOCK = pygame.time.Clock() FPS = 30 particles_list = [] container = Container(500, 500) generate_particles(container, 10) main_font = pygame.font.SysFont('comicsans', 50) lost_font = pygame.font.SysFont('comicsans', 60) # Draw text lives_label = main_font.render(f"Which Law ? ", 1, BLUE) level_label = main_font.render(f"Level", 1, BLUE) done = False while not done: for event in pygame.event.get(): if event.type == pygame.QUIT: done = True mouse_pos = pygame.mouse.get_pos() mouse_press = pygame.mouse.get_pressed() keys = pygame.key.get_pressed() SCREEN.fill(WHITE) container.draw() SCREEN.blit(lives_label, (10, 10)) SCREEN.blit(level_label, (WIDTH-level_label.get_width() - 10, 10)) for p in particles_list: p.draw() p.move() p.collide() # p1.draw() # p1.move() # p1.collide() # p2.draw() # p2.move() # p2.collide() # if keys[pygame.K_SPACE]: # p1.move() # pygame.draw.rect(SCREEN, BLACK, pygame.Rect( # 10, HEIGHT//2 - 125, 300, 250), 5) # pygame.draw.rect(SCREEN, BLACK, pygame.Rect( # 330, HEIGHT//2 - 125, 300, 250), 5) # pygame.draw.rect(SCREEN, BLACK, pygame.Rect( # 650, HEIGHT//2 - 125, 300, 250), 5) pygame.display.update() CLOCK.tick(FPS)
35.668421
147
0.614284
import pygame import random pygame.init() WIDTH, HEIGHT = 960, 700 SCREEN = pygame.display.set_mode((WIDTH, HEIGHT)) WHITE = (255, 255, 255) BLACK = (0, 0, 0) RED = (255, 0, 0) BLUE = (0, 0, 255) CLOCK = pygame.time.Clock() FPS = 30 particles_list = [] class Container(object): def __init__(self, width=600, height=600): self.color = BLACK self.width = width self.height = height self.pos_x = ((WIDTH // 2) - (self.width // 2)) self.pos_y = ((HEIGHT // 2) - (self.height // 2)) def draw(self): pygame.draw.rect(SCREEN, self.color, pygame.Rect( self.pos_x, self.pos_y, self.width, self.height), 5) class GasParticle(): def __init__(self, boundary): """Initialize the Object/Instance Args: boundary (List): This is the boundary that defines the container. (Within which the particles are held and collide against) Pos_X, Pos_Y, Width, Height """ self.radius = 10 # Particle Radius self.velocity_range = [8, 9, 10] self.velocity = random.choice(self.velocity_range) # True = Positive && False = Negative self.x_direction = random.choice([True, False]) # True = Positive && False = Negative self.y_direction = random.choice([True, False]) self.color = RED # List of defined Particles for particle-collision self.gas_particles = particles_list self.boundary = boundary self.pos_x = random.randrange( self.boundary[0] + self.radius, (self.boundary[0] + self.boundary[2]) - self.radius) self.pos_y = random.randrange( self.boundary[1] + self.radius, (self.boundary[1] + self.boundary[3]) - self.radius) self.center = [self.pos_x, self.pos_y] def move(self): """Movement of the Particle. Rudimentary-Not as random as it should be We are simply changing direction to a predefined direction- PHYSICS-ELEMENT IS MISSING (VECTORS) """ if self.x_direction == True: self.center[0] += self.velocity elif self.x_direction == False: self.center[0] -= self.velocity if self.y_direction == True: self.center[1] += self.velocity elif self.y_direction == False: self.center[1] -= self.velocity def collide_with_container(self): """With the boundaries of the bounding box defined. We can easily create/simulate collisions with the container """ if self.center[0] + self.radius >= self.boundary[2] + self.boundary[0]: self.x_direction = False elif self.center[0] - self.radius < self.boundary[0]: self.x_direction = True if self.center[1] + self.radius >= self.boundary[3] + self.boundary[1]: self.y_direction = False elif self.center[1] - self.radius < self.boundary[1]: self.y_direction = True def collide_with_particle(self): """This handles the collision of this.particle with other particles. BUG : Still not working. All particles assemble to one corner upon collision. """ for gas_particle in self.gas_particles: if self.center[0] + self.radius > gas_particle.center[0] - gas_particle.radius: if self.center[1] > gas_particle.center[1] - gas_particle.radius and self.center[1] < gas_particle.center[1] + gas_particle.radius: self.x_direction = False elif self.center[0] - self.radius < gas_particle.center[0] + gas_particle.radius: if self.center[1] > gas_particle.center[1] - gas_particle.radius and self.center[1] < gas_particle.center[1] + gas_particle.radius: self.x_direction = True if self.center[1] + self.radius > gas_particle.center[1] - gas_particle.radius: if self.center[0] > gas_particle.center[0] - gas_particle.radius and self.center[0] < gas_particle.center[0] + gas_particle.radius: self.y_direction = False elif self.center[1] - self.radius < gas_particle.center[1] + gas_particle.radius: if self.center[0] > gas_particle.center[0] - gas_particle.radius and self.center[0] < gas_particle.center[0] + gas_particle.radius: self.y_direction = True # for gas_particle in self.gas_particles: # if self.center[0] + self.radius >= gas_particle.center[0] - gas_particle.radius: # self.x_direction = False # elif self.center[0] - self.radius <= gas_particle.center[0] + gas_particle.radius: # self.x_direction = True # if self.center[1] + self.radius >= gas_particle.center[1] - gas_particle.radius: # self.y_direction = False # elif self.center[1] - self.radius <= gas_particle.center[1] + gas_particle.radius: # self.y_direction = True def collide(self): # self.collide_with_particle() self.collide_with_container() def draw(self): """Draw/render the object onto the pygame.Surface(SCREEN) """ pygame.draw.circle(SCREEN, self.color, self.center, self.radius) def generate_particles(container, n): global particles_list for _ in range(n): p = GasParticle([container.pos_x, container.pos_y, container.width, container.height]) particles_list.append(p) container = Container(500, 500) generate_particles(container, 10) main_font = pygame.font.SysFont('comicsans', 50) lost_font = pygame.font.SysFont('comicsans', 60) # Draw text lives_label = main_font.render(f"Which Law ? ", 1, BLUE) level_label = main_font.render(f"Level", 1, BLUE) done = False while not done: for event in pygame.event.get(): if event.type == pygame.QUIT: done = True mouse_pos = pygame.mouse.get_pos() mouse_press = pygame.mouse.get_pressed() keys = pygame.key.get_pressed() SCREEN.fill(WHITE) container.draw() SCREEN.blit(lives_label, (10, 10)) SCREEN.blit(level_label, (WIDTH-level_label.get_width() - 10, 10)) for p in particles_list: p.draw() p.move() p.collide() # p1.draw() # p1.move() # p1.collide() # p2.draw() # p2.move() # p2.collide() # if keys[pygame.K_SPACE]: # p1.move() # pygame.draw.rect(SCREEN, BLACK, pygame.Rect( # 10, HEIGHT//2 - 125, 300, 250), 5) # pygame.draw.rect(SCREEN, BLACK, pygame.Rect( # 330, HEIGHT//2 - 125, 300, 250), 5) # pygame.draw.rect(SCREEN, BLACK, pygame.Rect( # 650, HEIGHT//2 - 125, 300, 250), 5) pygame.display.update() CLOCK.tick(FPS)
603
4,514
122
3363492ff5f446b8195aa3dfd8957d55562fe848
4,835
py
Python
submit_scripts_and_plotting/paper_plots/make_interpretation_plots.py
apizzuto/v2_alert_stacking_FRA
6b5bd3f6cd1b5e472797a81a389a30198c64cd9f
[ "MIT" ]
null
null
null
submit_scripts_and_plotting/paper_plots/make_interpretation_plots.py
apizzuto/v2_alert_stacking_FRA
6b5bd3f6cd1b5e472797a81a389a30198c64cd9f
[ "MIT" ]
4
2021-01-08T20:14:23.000Z
2021-03-25T21:05:15.000Z
submit_scripts_and_plotting/paper_plots/make_interpretation_plots.py
apizzuto/v2_alert_stacking_FRA
6b5bd3f6cd1b5e472797a81a389a30198c64cd9f
[ "MIT" ]
null
null
null
'''Script to make all interpretation based plots for the alert followup fast response paper''' import os import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import scipy.stats as st from francis import utils from results_utils import * from francis.universe.universe_plotter import UniversePlotter mpl.style.use(utils.initialize_mpl_style()) base_output = os.path.abspath(utils.get_francis_path() + '../figures/paper_plots/') + '/' palette = sns.light_palette((210, 90, 60), input="husl", n_colors=12) if __name__ == "__main__": unis, binom_scans = get_universe_dict() binomial_scan_plots(unis, binom_scans) binomial_distribution_plots(unis, binom_scans) sensitivity_and_upper_limits(unis, binom_scans)
37.192308
96
0.583247
'''Script to make all interpretation based plots for the alert followup fast response paper''' import os import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import scipy.stats as st from francis import utils from results_utils import * from francis.universe.universe_plotter import UniversePlotter mpl.style.use(utils.initialize_mpl_style()) base_output = os.path.abspath(utils.get_francis_path() + '../figures/paper_plots/') + '/' palette = sns.light_palette((210, 90, 60), input="husl", n_colors=12) def get_universe_dict(): print("Initializing all of the results from three different time scales") unis = dict() binom_scans = dict() for delta_t in [1000., 172800., None]: uni = UniversePlotter(delta_t, 9.6, 'SC', 'MD2014SFR', save=False) pp, tmps = uni.get_overall_background_p(with_tmps=True) truth, pvals, arr = get_true_binomial_p(delta_t=delta_t) ppost = np.count_nonzero(uni.stacked_p <= truth) / float(len(uni.stacked_p)) unis[delta_t] = uni binom_scans[delta_t] = { 'truth': truth, 'pvals': pvals, 'arr': arr, 'tmps': tmps, 'p_post': ppost } if delta_t is None: print('\t- Done with time integrated initialization') else: print(f'\t- Done with time window {delta_t:.1e}') return unis, binom_scans def binomial_scan_plots(unis, binom_scans): print('Making binomial scan plots . . . ') for delta_t in [1000., 172800., None]: uni = unis[delta_t] scan = binom_scans[delta_t] fig, ax = plt.subplots(dpi=200) plt.plot( np.r_[:len(scan['arr'])] + 1, scan['arr'], color=palette[-1], lw=2., zorder=20 ) thresh_ps = st.norm.sf([0., 1., 2., 3.]) percents = np.percentile(uni.stacked_p, thresh_ps * 100.) for perc, lab in zip(percents, ['Median', r'$+1\sigma$', r'$+2\sigma$', r'$+3\sigma$']): plt.axhline(perc, ls='-.', color = sns.xkcd_rgb['dodger blue'], lw=0.85, alpha=0.8) plt.text(160, perc*1.1, lab, color = sns.xkcd_rgb['dodger blue'], alpha=0.8) plt.yscale('log') plt.xlabel('Number of sources') plt.ylabel('Binomial p-value') plt.ylim(1.1e-5, 1.2e0) delta_t_str = 'steady' if delta_t is None else f'delta_t_{delta_t:.0e}'.replace('+', '') for ftype in ['.png', '.pdf']: plt.savefig( base_output + f'binomial_scan_{delta_t_str}{ftype}', bbox_inches='tight' ) plt.close() print('\t- Done with binomial scan plots') def binomial_distribution_plots(unis, binom_scans): print('Making binomial distribution plots') for delta_t in [1000., 172800., None]: uni = unis[delta_t] scan = binom_scans[delta_t] uni.plot_background_binomial_p() plt.axvline(scan['truth'], color='k', zorder=20, lw=2.) plt.text(3e-4, 1e2, f"p-val: {scan['p_post']:.3f}") plt.xlim(plt.xlim()[1], plt.xlim()[0]) delta_t_str = 'steady' if delta_t is None else f'delta_t_{delta_t:.0e}'.replace('+', '') for ftype in ['.png', '.pdf']: plt.savefig( base_output + f'binomial_distribution_{delta_t_str}{ftype}', bbox_inches='tight' ) plt.close() print('\t- Done with binomial distribution plots') def sensitivity_and_upper_limits(unis, binom_scans): print('Calculating upper limits and making plots') for delta_t in [1000., 172800., None]: uni = unis[delta_t] scan = binom_scans[delta_t] delta_t_str = 'steady' if delta_t is None else f'delta_t_{delta_t:.0e}'.replace('+', '') uni.brazil_bands(rotated=True) for ftype in ['.png', '.pdf']: plt.savefig( base_output + f'sensitivity_rotated_{delta_t_str}{ftype}', bbox_inches='tight' ) plt.close() for rotated in [True, False]: uni.brazil_bands( rotated=rotated, with_result=True, result=scan['truth'] ) rotated_str = '_rotated' if rotated else '' for ftype in ['.png', '.pdf']: plt.savefig( base_output + f'upper_limit{rotated_str}_{delta_t_str}{ftype}', bbox_inches='tight' ) plt.close() print('\t- Done with upper limit plots') if __name__ == "__main__": unis, binom_scans = get_universe_dict() binomial_scan_plots(unis, binom_scans) binomial_distribution_plots(unis, binom_scans) sensitivity_and_upper_limits(unis, binom_scans)
3,990
0
100
b4ffc6d69f9d120135e6f21d3579c97210bec686
957
py
Python
tests/test_igit.py
jmosbacher/igit
62b613fd5bed28b603160dc998c02106ee4fdef0
[ "Apache-2.0" ]
null
null
null
tests/test_igit.py
jmosbacher/igit
62b613fd5bed28b603160dc998c02106ee4fdef0
[ "Apache-2.0" ]
107
2021-06-28T02:10:11.000Z
2022-03-30T02:38:03.000Z
tests/test_igit.py
jmosbacher/igit
62b613fd5bed28b603160dc998c02106ee4fdef0
[ "Apache-2.0" ]
null
null
null
#!/usr/bin/env python """Tests for `igit` package.""" # pylint: disable=redefined-outer-name import random import pytest import igit @pytest.fixture(scope="module")
22.255814
62
0.654127
#!/usr/bin/env python """Tests for `igit` package.""" # pylint: disable=redefined-outer-name import random import pytest import igit @pytest.fixture(scope="module") def memory_repo(): return igit.init("memory://igit_test") def test_interval_tree(): tree = igit.IntervalTree() tree[1, 10] = 9 assert tree[5] == 9 tree[5, 20] = 11 assert tree[15] == 11 assert tree[10] == 11 assert tree[2] == 9 def test_label_tree(): setting2 = igit.LabelTree() setting2["subsetting1"] = 1 assert setting2["subsetting1"] == 1 setting2["subsetting2"] = 9.9 assert setting2["subsetting2"] == 9.9 setting2["subsetting3"] = "text" assert setting2["subsetting3"] == "text" def test_commit(memory_repo): tree = igit.LabelTree() tree['test'] = 'string' memory_repo.add(label_tree=tree) ref = memory_repo.commit(f"commit {random.randint(1,10)}") assert isinstance(ref, igit.models.CommitRef)
693
0
91
79a11c886a6c2abb67ccf6ef4d9891b92e62ae1b
1,191
py
Python
projects/nncrystal/tools/export_dataset.py
nncrystals/detectron2
ea5dcbbb135a62807cacded5a381253d1f605427
[ "Apache-2.0" ]
null
null
null
projects/nncrystal/tools/export_dataset.py
nncrystals/detectron2
ea5dcbbb135a62807cacded5a381253d1f605427
[ "Apache-2.0" ]
null
null
null
projects/nncrystal/tools/export_dataset.py
nncrystals/detectron2
ea5dcbbb135a62807cacded5a381253d1f605427
[ "Apache-2.0" ]
null
null
null
import argparse import json import os from xml.etree import ElementTree from cvat.api import CVATAPI from cvat.argument import cvat_args from cvat.cvat_xml import cvat_xml_to_coco if __name__ == '__main__': parser = argparse.ArgumentParser() cvat_args(parser) parser.add_argument("--job_id", action="append", type=int, required=True) parser.add_argument("--output_dir", default="./datasets") args = parser.parse_args() api = CVATAPI(args.cvat_host) api.login(args.cvat_username, args.cvat_password) for job_id in args.job_id: job = api.get_job(job_id).json() try: task_id = job["task_id"] except: raise RuntimeError(job) data = api.export_data(task_id, format="CVAT XML 1.1 for images").content root = ElementTree.fromstring(data) result = cvat_xml_to_coco(root, ignore_crowded=True, occluded_as_crowded=True, ignore_attributes=True ) with open(os.path.join(args.output_dir, f"export_job_{job_id}.coco.json"), "w") as f: json.dump(result, f)
34.028571
93
0.619647
import argparse import json import os from xml.etree import ElementTree from cvat.api import CVATAPI from cvat.argument import cvat_args from cvat.cvat_xml import cvat_xml_to_coco if __name__ == '__main__': parser = argparse.ArgumentParser() cvat_args(parser) parser.add_argument("--job_id", action="append", type=int, required=True) parser.add_argument("--output_dir", default="./datasets") args = parser.parse_args() api = CVATAPI(args.cvat_host) api.login(args.cvat_username, args.cvat_password) for job_id in args.job_id: job = api.get_job(job_id).json() try: task_id = job["task_id"] except: raise RuntimeError(job) data = api.export_data(task_id, format="CVAT XML 1.1 for images").content root = ElementTree.fromstring(data) result = cvat_xml_to_coco(root, ignore_crowded=True, occluded_as_crowded=True, ignore_attributes=True ) with open(os.path.join(args.output_dir, f"export_job_{job_id}.coco.json"), "w") as f: json.dump(result, f)
0
0
0
5522a16200a742cf8ad364ad9d24d6f9e876d0e7
1,533
py
Python
apps/user/views.py
Agrimonia/Mobileshop
1922572c7652aa85f4676a66fc49e456f560d120
[ "MIT" ]
4
2017-12-17T13:41:31.000Z
2018-09-22T06:17:14.000Z
apps/user/views.py
Agrimonia/Mobileshop
1922572c7652aa85f4676a66fc49e456f560d120
[ "MIT" ]
2
2017-12-16T08:30:13.000Z
2020-01-09T03:21:43.000Z
apps/user/views.py
Agrimonia/Mobileshop
1922572c7652aa85f4676a66fc49e456f560d120
[ "MIT" ]
1
2017-12-04T13:55:12.000Z
2017-12-04T13:55:12.000Z
# from django.shortcuts import render import json from rest_framework import status from rest_framework.views import APIView from rest_framework.response import Response from rest_framework.permissions import AllowAny from backend.helpers import InputErrorMessage, JSONResponse from .models import UserSerialiser, User # Create your views here.
36.5
114
0.668624
# from django.shortcuts import render import json from rest_framework import status from rest_framework.views import APIView from rest_framework.response import Response from rest_framework.permissions import AllowAny from backend.helpers import InputErrorMessage, JSONResponse from .models import UserSerialiser, User # Create your views here. class UserShow(APIView): def get(self, request, format=None): serializer = UserSerialiser(request.user) return JSONResponse(serializer.data) class Register(APIView): permission_classes = (AllowAny, ) def post(self, request): try: data = json.loads(request.body.decode('utf-8')) except json.JSONDecodeError: return InputErrorMessage("Invalid JSON body") if "username" not in data: return InputErrorMessage("username not provide.") if User.objects.filter(username=data["username"]).exists(): return InputErrorMessage("username is used.") if "email" not in data: return InputErrorMessage("email not provide.") if User.objects.filter(email=data["email"]).exists(): return InputErrorMessage("email is used.") if "password" not in data: return InputErrorMessage("password not provide.") user = User.objects.create_user(username=data["username"], email=data["email"], password=data["password"]) user.save() return JSONResponse({ "code": 200, "message": "OK", })
1,040
75
71