commit
stringlengths
40
40
old_file
stringlengths
4
118
new_file
stringlengths
4
118
old_contents
stringlengths
0
2.94k
new_contents
stringlengths
1
4.43k
subject
stringlengths
15
444
message
stringlengths
16
3.45k
lang
stringclasses
1 value
license
stringclasses
13 values
repos
stringlengths
5
43.2k
prompt
stringlengths
17
4.58k
response
stringlengths
1
4.43k
prompt_tagged
stringlengths
58
4.62k
response_tagged
stringlengths
1
4.43k
text
stringlengths
132
7.29k
text_tagged
stringlengths
173
7.33k
479f04ae23227ebb8a3a298d875b73cb1b6de3b6
ceph_deploy/tests/parser/test_rgw.py
ceph_deploy/tests/parser/test_rgw.py
import pytest from ceph_deploy.cli import get_parser class TestParserRGW(object): def setup(self): self.parser = get_parser() def test_rgw_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('rgw --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy rgw' in out assert 'positional arguments:' in out assert 'optional arguments:' in out @pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12150") def test_rgw_create_host_required(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('rgw create'.split()) out, err = capsys.readouterr() assert "error: too few arguments" in err def test_rgw_create_one_host(self): args = self.parser.parse_args('rgw create host1'.split()) assert args.rgw[0][0] == 'host1' def test_rgw_create_multiple_hosts(self): hostnames = ['host1', 'host2', 'host3'] args = self.parser.parse_args(['rgw', 'create'] + hostnames) # args.rgw is a list of tuples, and tuple[0] is the hostname hosts = [x[0] for x in args.rgw] assert frozenset(hosts) == frozenset(hostnames)
Add argparse tests for rgw module
[RM-11742] Add argparse tests for rgw module Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>
Python
mit
branto1/ceph-deploy,Vicente-Cheng/ceph-deploy,shenhequnying/ceph-deploy,imzhulei/ceph-deploy,Vicente-Cheng/ceph-deploy,trhoden/ceph-deploy,SUSE/ceph-deploy-to-be-deleted,ceph/ceph-deploy,SUSE/ceph-deploy,zhouyuan/ceph-deploy,imzhulei/ceph-deploy,osynge/ceph-deploy,isyippee/ceph-deploy,ghxandsky/ceph-deploy,shenhequnying/ceph-deploy,ceph/ceph-deploy,codenrhoden/ceph-deploy,isyippee/ceph-deploy,trhoden/ceph-deploy,codenrhoden/ceph-deploy,osynge/ceph-deploy,ghxandsky/ceph-deploy,zhouyuan/ceph-deploy,branto1/ceph-deploy,SUSE/ceph-deploy-to-be-deleted,SUSE/ceph-deploy
[RM-11742] Add argparse tests for rgw module Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>
import pytest from ceph_deploy.cli import get_parser class TestParserRGW(object): def setup(self): self.parser = get_parser() def test_rgw_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('rgw --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy rgw' in out assert 'positional arguments:' in out assert 'optional arguments:' in out @pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12150") def test_rgw_create_host_required(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('rgw create'.split()) out, err = capsys.readouterr() assert "error: too few arguments" in err def test_rgw_create_one_host(self): args = self.parser.parse_args('rgw create host1'.split()) assert args.rgw[0][0] == 'host1' def test_rgw_create_multiple_hosts(self): hostnames = ['host1', 'host2', 'host3'] args = self.parser.parse_args(['rgw', 'create'] + hostnames) # args.rgw is a list of tuples, and tuple[0] is the hostname hosts = [x[0] for x in args.rgw] assert frozenset(hosts) == frozenset(hostnames)
<commit_before><commit_msg>[RM-11742] Add argparse tests for rgw module Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com><commit_after>
import pytest from ceph_deploy.cli import get_parser class TestParserRGW(object): def setup(self): self.parser = get_parser() def test_rgw_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('rgw --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy rgw' in out assert 'positional arguments:' in out assert 'optional arguments:' in out @pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12150") def test_rgw_create_host_required(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('rgw create'.split()) out, err = capsys.readouterr() assert "error: too few arguments" in err def test_rgw_create_one_host(self): args = self.parser.parse_args('rgw create host1'.split()) assert args.rgw[0][0] == 'host1' def test_rgw_create_multiple_hosts(self): hostnames = ['host1', 'host2', 'host3'] args = self.parser.parse_args(['rgw', 'create'] + hostnames) # args.rgw is a list of tuples, and tuple[0] is the hostname hosts = [x[0] for x in args.rgw] assert frozenset(hosts) == frozenset(hostnames)
[RM-11742] Add argparse tests for rgw module Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com>import pytest from ceph_deploy.cli import get_parser class TestParserRGW(object): def setup(self): self.parser = get_parser() def test_rgw_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('rgw --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy rgw' in out assert 'positional arguments:' in out assert 'optional arguments:' in out @pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12150") def test_rgw_create_host_required(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('rgw create'.split()) out, err = capsys.readouterr() assert "error: too few arguments" in err def test_rgw_create_one_host(self): args = self.parser.parse_args('rgw create host1'.split()) assert args.rgw[0][0] == 'host1' def test_rgw_create_multiple_hosts(self): hostnames = ['host1', 'host2', 'host3'] args = self.parser.parse_args(['rgw', 'create'] + hostnames) # args.rgw is a list of tuples, and tuple[0] is the hostname hosts = [x[0] for x in args.rgw] assert frozenset(hosts) == frozenset(hostnames)
<commit_before><commit_msg>[RM-11742] Add argparse tests for rgw module Signed-off-by: Travis Rhoden <e5e44d6dbac12e32e01c3bb8b67940d8b42e225b@redhat.com><commit_after>import pytest from ceph_deploy.cli import get_parser class TestParserRGW(object): def setup(self): self.parser = get_parser() def test_rgw_help(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('rgw --help'.split()) out, err = capsys.readouterr() assert 'usage: ceph-deploy rgw' in out assert 'positional arguments:' in out assert 'optional arguments:' in out @pytest.mark.skipif(reason="http://tracker.ceph.com/issues/12150") def test_rgw_create_host_required(self, capsys): with pytest.raises(SystemExit): self.parser.parse_args('rgw create'.split()) out, err = capsys.readouterr() assert "error: too few arguments" in err def test_rgw_create_one_host(self): args = self.parser.parse_args('rgw create host1'.split()) assert args.rgw[0][0] == 'host1' def test_rgw_create_multiple_hosts(self): hostnames = ['host1', 'host2', 'host3'] args = self.parser.parse_args(['rgw', 'create'] + hostnames) # args.rgw is a list of tuples, and tuple[0] is the hostname hosts = [x[0] for x in args.rgw] assert frozenset(hosts) == frozenset(hostnames)
56ab983036bcb5c78eee91483c1e610da69216d1
kubernetes/client/apis/__init__.py
kubernetes/client/apis/__init__.py
from __future__ import absolute_import import warnings # flake8: noqa # alias kubernetes.client.api package and print deprecation warning from kubernetes.client.api import * warnings.filterwarnings('default', module='kubernetes.client.apis') warnings.warn( "The package kubernetes.client.apis is renamed and deprecated, use kubernetes.client.api instead (please note that the trailing s was removed).", DeprecationWarning )
Add kubernetes.client.apis as an alias to kubernetes.client.api
Add kubernetes.client.apis as an alias to kubernetes.client.api Reference: https://github.com/kubernetes-client/python/issues/974 Signed-off-by: Nabarun Pal <46a782cbd1e9f752958998187886c2b51fda054c@gmail.com>
Python
apache-2.0
kubernetes-client/python,kubernetes-client/python
Add kubernetes.client.apis as an alias to kubernetes.client.api Reference: https://github.com/kubernetes-client/python/issues/974 Signed-off-by: Nabarun Pal <46a782cbd1e9f752958998187886c2b51fda054c@gmail.com>
from __future__ import absolute_import import warnings # flake8: noqa # alias kubernetes.client.api package and print deprecation warning from kubernetes.client.api import * warnings.filterwarnings('default', module='kubernetes.client.apis') warnings.warn( "The package kubernetes.client.apis is renamed and deprecated, use kubernetes.client.api instead (please note that the trailing s was removed).", DeprecationWarning )
<commit_before><commit_msg>Add kubernetes.client.apis as an alias to kubernetes.client.api Reference: https://github.com/kubernetes-client/python/issues/974 Signed-off-by: Nabarun Pal <46a782cbd1e9f752958998187886c2b51fda054c@gmail.com><commit_after>
from __future__ import absolute_import import warnings # flake8: noqa # alias kubernetes.client.api package and print deprecation warning from kubernetes.client.api import * warnings.filterwarnings('default', module='kubernetes.client.apis') warnings.warn( "The package kubernetes.client.apis is renamed and deprecated, use kubernetes.client.api instead (please note that the trailing s was removed).", DeprecationWarning )
Add kubernetes.client.apis as an alias to kubernetes.client.api Reference: https://github.com/kubernetes-client/python/issues/974 Signed-off-by: Nabarun Pal <46a782cbd1e9f752958998187886c2b51fda054c@gmail.com>from __future__ import absolute_import import warnings # flake8: noqa # alias kubernetes.client.api package and print deprecation warning from kubernetes.client.api import * warnings.filterwarnings('default', module='kubernetes.client.apis') warnings.warn( "The package kubernetes.client.apis is renamed and deprecated, use kubernetes.client.api instead (please note that the trailing s was removed).", DeprecationWarning )
<commit_before><commit_msg>Add kubernetes.client.apis as an alias to kubernetes.client.api Reference: https://github.com/kubernetes-client/python/issues/974 Signed-off-by: Nabarun Pal <46a782cbd1e9f752958998187886c2b51fda054c@gmail.com><commit_after>from __future__ import absolute_import import warnings # flake8: noqa # alias kubernetes.client.api package and print deprecation warning from kubernetes.client.api import * warnings.filterwarnings('default', module='kubernetes.client.apis') warnings.warn( "The package kubernetes.client.apis is renamed and deprecated, use kubernetes.client.api instead (please note that the trailing s was removed).", DeprecationWarning )
ad2b0447afbee92684ab0b4f14dc0d45a28f3ba2
tests/foomodulegen-auto.py
tests/foomodulegen-auto.py
#! /usr/bin/env python import sys import re import pybindgen from pybindgen.typehandlers import base as typehandlers from pybindgen import (ReturnValue, Parameter, Module, Function, FileCodeSink) from pybindgen import (CppMethod, CppConstructor, CppClass, Enum) from pybindgen.gccxmlparser import ModuleParser from pybindgen.function import CustomFunctionWrapper from pybindgen.cppmethod import CustomCppMethodWrapper import foomodulegen_common def my_module_gen(): out = FileCodeSink(sys.stdout) pygen_file = open(sys.argv[2], "wt") module_parser = ModuleParser('foo2', '::') module = module_parser.parse([sys.argv[1]], includes=['"foo.h"'], pygen_sink=FileCodeSink(pygen_file)) pygen_file.close() foomodulegen_common.customize_module(module) module.generate(out) if __name__ == '__main__': try: import cProfile as profile except ImportError: my_module_gen() else: print >> sys.stderr, "** running under profiler" profile.run('my_module_gen()', 'foomodulegen-auto.pstat')
#! /usr/bin/env python import sys import re import pybindgen from pybindgen.typehandlers import base as typehandlers from pybindgen import (ReturnValue, Parameter, Module, Function, FileCodeSink) from pybindgen import (CppMethod, CppConstructor, CppClass, Enum) from pybindgen.gccxmlparser import ModuleParser from pybindgen.function import CustomFunctionWrapper from pybindgen.cppmethod import CustomCppMethodWrapper import foomodulegen_common def my_module_gen(): out = FileCodeSink(sys.stdout) pygen_file = open(sys.argv[2], "wt") module_parser = ModuleParser('foo2', '::') module = module_parser.parse([sys.argv[1]], includes=['"foo.h"'], pygen_sink=FileCodeSink(pygen_file)) pygen_file.close() foomodulegen_common.customize_module(module) module.generate(out) def main(): if sys.argv[1] == '-d': del sys.argv[1] import pdb pdb.set_trace() my_module_gen() else: try: import cProfile as profile except ImportError: my_module_gen() else: print >> sys.stderr, "** running under profiler" profile.run('my_module_gen()', 'foomodulegen-auto.pstat') if __name__ == '__main__': main()
Add a debug switch (-d) to enable debugger
Add a debug switch (-d) to enable debugger
Python
lgpl-2.1
cawka/pybindgen,caramucho/pybindgen,caramucho/pybindgen,cawka/pybindgen,cawka/pybindgen,caramucho/pybindgen,cawka/pybindgen,caramucho/pybindgen
#! /usr/bin/env python import sys import re import pybindgen from pybindgen.typehandlers import base as typehandlers from pybindgen import (ReturnValue, Parameter, Module, Function, FileCodeSink) from pybindgen import (CppMethod, CppConstructor, CppClass, Enum) from pybindgen.gccxmlparser import ModuleParser from pybindgen.function import CustomFunctionWrapper from pybindgen.cppmethod import CustomCppMethodWrapper import foomodulegen_common def my_module_gen(): out = FileCodeSink(sys.stdout) pygen_file = open(sys.argv[2], "wt") module_parser = ModuleParser('foo2', '::') module = module_parser.parse([sys.argv[1]], includes=['"foo.h"'], pygen_sink=FileCodeSink(pygen_file)) pygen_file.close() foomodulegen_common.customize_module(module) module.generate(out) if __name__ == '__main__': try: import cProfile as profile except ImportError: my_module_gen() else: print >> sys.stderr, "** running under profiler" profile.run('my_module_gen()', 'foomodulegen-auto.pstat') Add a debug switch (-d) to enable debugger
#! /usr/bin/env python import sys import re import pybindgen from pybindgen.typehandlers import base as typehandlers from pybindgen import (ReturnValue, Parameter, Module, Function, FileCodeSink) from pybindgen import (CppMethod, CppConstructor, CppClass, Enum) from pybindgen.gccxmlparser import ModuleParser from pybindgen.function import CustomFunctionWrapper from pybindgen.cppmethod import CustomCppMethodWrapper import foomodulegen_common def my_module_gen(): out = FileCodeSink(sys.stdout) pygen_file = open(sys.argv[2], "wt") module_parser = ModuleParser('foo2', '::') module = module_parser.parse([sys.argv[1]], includes=['"foo.h"'], pygen_sink=FileCodeSink(pygen_file)) pygen_file.close() foomodulegen_common.customize_module(module) module.generate(out) def main(): if sys.argv[1] == '-d': del sys.argv[1] import pdb pdb.set_trace() my_module_gen() else: try: import cProfile as profile except ImportError: my_module_gen() else: print >> sys.stderr, "** running under profiler" profile.run('my_module_gen()', 'foomodulegen-auto.pstat') if __name__ == '__main__': main()
<commit_before>#! /usr/bin/env python import sys import re import pybindgen from pybindgen.typehandlers import base as typehandlers from pybindgen import (ReturnValue, Parameter, Module, Function, FileCodeSink) from pybindgen import (CppMethod, CppConstructor, CppClass, Enum) from pybindgen.gccxmlparser import ModuleParser from pybindgen.function import CustomFunctionWrapper from pybindgen.cppmethod import CustomCppMethodWrapper import foomodulegen_common def my_module_gen(): out = FileCodeSink(sys.stdout) pygen_file = open(sys.argv[2], "wt") module_parser = ModuleParser('foo2', '::') module = module_parser.parse([sys.argv[1]], includes=['"foo.h"'], pygen_sink=FileCodeSink(pygen_file)) pygen_file.close() foomodulegen_common.customize_module(module) module.generate(out) if __name__ == '__main__': try: import cProfile as profile except ImportError: my_module_gen() else: print >> sys.stderr, "** running under profiler" profile.run('my_module_gen()', 'foomodulegen-auto.pstat') <commit_msg>Add a debug switch (-d) to enable debugger<commit_after>
#! /usr/bin/env python import sys import re import pybindgen from pybindgen.typehandlers import base as typehandlers from pybindgen import (ReturnValue, Parameter, Module, Function, FileCodeSink) from pybindgen import (CppMethod, CppConstructor, CppClass, Enum) from pybindgen.gccxmlparser import ModuleParser from pybindgen.function import CustomFunctionWrapper from pybindgen.cppmethod import CustomCppMethodWrapper import foomodulegen_common def my_module_gen(): out = FileCodeSink(sys.stdout) pygen_file = open(sys.argv[2], "wt") module_parser = ModuleParser('foo2', '::') module = module_parser.parse([sys.argv[1]], includes=['"foo.h"'], pygen_sink=FileCodeSink(pygen_file)) pygen_file.close() foomodulegen_common.customize_module(module) module.generate(out) def main(): if sys.argv[1] == '-d': del sys.argv[1] import pdb pdb.set_trace() my_module_gen() else: try: import cProfile as profile except ImportError: my_module_gen() else: print >> sys.stderr, "** running under profiler" profile.run('my_module_gen()', 'foomodulegen-auto.pstat') if __name__ == '__main__': main()
#! /usr/bin/env python import sys import re import pybindgen from pybindgen.typehandlers import base as typehandlers from pybindgen import (ReturnValue, Parameter, Module, Function, FileCodeSink) from pybindgen import (CppMethod, CppConstructor, CppClass, Enum) from pybindgen.gccxmlparser import ModuleParser from pybindgen.function import CustomFunctionWrapper from pybindgen.cppmethod import CustomCppMethodWrapper import foomodulegen_common def my_module_gen(): out = FileCodeSink(sys.stdout) pygen_file = open(sys.argv[2], "wt") module_parser = ModuleParser('foo2', '::') module = module_parser.parse([sys.argv[1]], includes=['"foo.h"'], pygen_sink=FileCodeSink(pygen_file)) pygen_file.close() foomodulegen_common.customize_module(module) module.generate(out) if __name__ == '__main__': try: import cProfile as profile except ImportError: my_module_gen() else: print >> sys.stderr, "** running under profiler" profile.run('my_module_gen()', 'foomodulegen-auto.pstat') Add a debug switch (-d) to enable debugger#! /usr/bin/env python import sys import re import pybindgen from pybindgen.typehandlers import base as typehandlers from pybindgen import (ReturnValue, Parameter, Module, Function, FileCodeSink) from pybindgen import (CppMethod, CppConstructor, CppClass, Enum) from pybindgen.gccxmlparser import ModuleParser from pybindgen.function import CustomFunctionWrapper from pybindgen.cppmethod import CustomCppMethodWrapper import foomodulegen_common def my_module_gen(): out = FileCodeSink(sys.stdout) pygen_file = open(sys.argv[2], "wt") module_parser = ModuleParser('foo2', '::') module = module_parser.parse([sys.argv[1]], includes=['"foo.h"'], pygen_sink=FileCodeSink(pygen_file)) pygen_file.close() foomodulegen_common.customize_module(module) module.generate(out) def main(): if sys.argv[1] == '-d': del sys.argv[1] import pdb pdb.set_trace() my_module_gen() else: try: import cProfile as profile except ImportError: my_module_gen() else: print >> sys.stderr, "** running under profiler" profile.run('my_module_gen()', 'foomodulegen-auto.pstat') if __name__ == '__main__': main()
<commit_before>#! /usr/bin/env python import sys import re import pybindgen from pybindgen.typehandlers import base as typehandlers from pybindgen import (ReturnValue, Parameter, Module, Function, FileCodeSink) from pybindgen import (CppMethod, CppConstructor, CppClass, Enum) from pybindgen.gccxmlparser import ModuleParser from pybindgen.function import CustomFunctionWrapper from pybindgen.cppmethod import CustomCppMethodWrapper import foomodulegen_common def my_module_gen(): out = FileCodeSink(sys.stdout) pygen_file = open(sys.argv[2], "wt") module_parser = ModuleParser('foo2', '::') module = module_parser.parse([sys.argv[1]], includes=['"foo.h"'], pygen_sink=FileCodeSink(pygen_file)) pygen_file.close() foomodulegen_common.customize_module(module) module.generate(out) if __name__ == '__main__': try: import cProfile as profile except ImportError: my_module_gen() else: print >> sys.stderr, "** running under profiler" profile.run('my_module_gen()', 'foomodulegen-auto.pstat') <commit_msg>Add a debug switch (-d) to enable debugger<commit_after>#! /usr/bin/env python import sys import re import pybindgen from pybindgen.typehandlers import base as typehandlers from pybindgen import (ReturnValue, Parameter, Module, Function, FileCodeSink) from pybindgen import (CppMethod, CppConstructor, CppClass, Enum) from pybindgen.gccxmlparser import ModuleParser from pybindgen.function import CustomFunctionWrapper from pybindgen.cppmethod import CustomCppMethodWrapper import foomodulegen_common def my_module_gen(): out = FileCodeSink(sys.stdout) pygen_file = open(sys.argv[2], "wt") module_parser = ModuleParser('foo2', '::') module = module_parser.parse([sys.argv[1]], includes=['"foo.h"'], pygen_sink=FileCodeSink(pygen_file)) pygen_file.close() foomodulegen_common.customize_module(module) module.generate(out) def main(): if sys.argv[1] == '-d': del sys.argv[1] import pdb pdb.set_trace() my_module_gen() else: try: import cProfile as profile except ImportError: my_module_gen() else: print >> sys.stderr, "** running under profiler" profile.run('my_module_gen()', 'foomodulegen-auto.pstat') if __name__ == '__main__': main()
cec400d28be8a3868f3a52fb5cbd27d63b31bbea
tests/test_nowallet.py
tests/test_nowallet.py
import pytest import nowallet.nowallet as nowallet @pytest.fixture def dummy_connection(event_loop): server, port = 'hsmithsxurybd7uh.onion', 53011 return nowallet.Connection(event_loop, server, port) @pytest.fixture def dummy_wallet(event_loop, dummy_connection): salt = "CORRECT HORSE" password = "BATTERY STAPLE" return nowallet.Wallet(salt, password, dummy_connection, event_loop, nowallet.TBTC) def test_wallet_attributes(dummy_wallet): assert not dummy_wallet.history
Add initial fixtures for testing nowallet.py.
Add initial fixtures for testing nowallet.py.
Python
mit
metamarcdw/nowallet
Add initial fixtures for testing nowallet.py.
import pytest import nowallet.nowallet as nowallet @pytest.fixture def dummy_connection(event_loop): server, port = 'hsmithsxurybd7uh.onion', 53011 return nowallet.Connection(event_loop, server, port) @pytest.fixture def dummy_wallet(event_loop, dummy_connection): salt = "CORRECT HORSE" password = "BATTERY STAPLE" return nowallet.Wallet(salt, password, dummy_connection, event_loop, nowallet.TBTC) def test_wallet_attributes(dummy_wallet): assert not dummy_wallet.history
<commit_before><commit_msg>Add initial fixtures for testing nowallet.py.<commit_after>
import pytest import nowallet.nowallet as nowallet @pytest.fixture def dummy_connection(event_loop): server, port = 'hsmithsxurybd7uh.onion', 53011 return nowallet.Connection(event_loop, server, port) @pytest.fixture def dummy_wallet(event_loop, dummy_connection): salt = "CORRECT HORSE" password = "BATTERY STAPLE" return nowallet.Wallet(salt, password, dummy_connection, event_loop, nowallet.TBTC) def test_wallet_attributes(dummy_wallet): assert not dummy_wallet.history
Add initial fixtures for testing nowallet.py.import pytest import nowallet.nowallet as nowallet @pytest.fixture def dummy_connection(event_loop): server, port = 'hsmithsxurybd7uh.onion', 53011 return nowallet.Connection(event_loop, server, port) @pytest.fixture def dummy_wallet(event_loop, dummy_connection): salt = "CORRECT HORSE" password = "BATTERY STAPLE" return nowallet.Wallet(salt, password, dummy_connection, event_loop, nowallet.TBTC) def test_wallet_attributes(dummy_wallet): assert not dummy_wallet.history
<commit_before><commit_msg>Add initial fixtures for testing nowallet.py.<commit_after>import pytest import nowallet.nowallet as nowallet @pytest.fixture def dummy_connection(event_loop): server, port = 'hsmithsxurybd7uh.onion', 53011 return nowallet.Connection(event_loop, server, port) @pytest.fixture def dummy_wallet(event_loop, dummy_connection): salt = "CORRECT HORSE" password = "BATTERY STAPLE" return nowallet.Wallet(salt, password, dummy_connection, event_loop, nowallet.TBTC) def test_wallet_attributes(dummy_wallet): assert not dummy_wallet.history
8008a05fe59503d99a2141f14fcbb89f0d80322c
tests/test_selected.py
tests/test_selected.py
from tests.base import IntegrationTest from time import sleep class TestAnnotateAction(IntegrationTest): viminput = """ * [ ] test task 1 #{uuid} * [ ] test task 2 #{uuid} """ tasks = [ dict(description="test task 1"), dict(description="test task 2"), ] def execute(self): self.command( "TaskWikiAnnotate This is annotation.", regex="Task \"test task 1\" annotated.$", lines=1) self.tasks[0].refresh() annotation = self.tasks[0]['annotations'] assert annotation != [] assert annotation[0]['description'] == "This is annotation." class TestAnnotateActionMoved(IntegrationTest): viminput = """ * [ ] test task 1 #{uuid} * [ ] test task 2 #{uuid} """ tasks = [ dict(description="test task 1"), dict(description="test task 2"), ] def execute(self): self.client.type('2gg') # Go to the second line self.command( "TaskWikiAnnotate This is annotation.", regex="Task \"test task 2\" annotated.$", lines=1) self.tasks[1].refresh() annotation = self.tasks[1]['annotations'] assert annotation != [] assert annotation[0]['description'] == "This is annotation." class TestAnnotateActionRange(IntegrationTest): viminput = """ * [ ] test task 1 #{uuid} * [ ] test task 2 #{uuid} """ tasks = [ dict(description="test task 1"), dict(description="test task 2"), ] def execute(self): self.client.type('V2gg') # Go to the second line self.client.feedkeys(":TaskWikiAnnotate This is annotation.") self.client.type('<Enter>') sleep(2) for task in self.tasks: task.refresh() annotation = self.tasks[0]['annotations'] assert annotation != [] assert annotation[0]['description'] == "This is annotation." annotation = self.tasks[1]['annotations'] assert annotation != [] assert annotation[0]['description'] == "This is annotation."
Add tests for TaskWikiAnnotation command
test: Add tests for TaskWikiAnnotation command
Python
mit
phha/taskwiki,Spirotot/taskwiki
test: Add tests for TaskWikiAnnotation command
from tests.base import IntegrationTest from time import sleep class TestAnnotateAction(IntegrationTest): viminput = """ * [ ] test task 1 #{uuid} * [ ] test task 2 #{uuid} """ tasks = [ dict(description="test task 1"), dict(description="test task 2"), ] def execute(self): self.command( "TaskWikiAnnotate This is annotation.", regex="Task \"test task 1\" annotated.$", lines=1) self.tasks[0].refresh() annotation = self.tasks[0]['annotations'] assert annotation != [] assert annotation[0]['description'] == "This is annotation." class TestAnnotateActionMoved(IntegrationTest): viminput = """ * [ ] test task 1 #{uuid} * [ ] test task 2 #{uuid} """ tasks = [ dict(description="test task 1"), dict(description="test task 2"), ] def execute(self): self.client.type('2gg') # Go to the second line self.command( "TaskWikiAnnotate This is annotation.", regex="Task \"test task 2\" annotated.$", lines=1) self.tasks[1].refresh() annotation = self.tasks[1]['annotations'] assert annotation != [] assert annotation[0]['description'] == "This is annotation." class TestAnnotateActionRange(IntegrationTest): viminput = """ * [ ] test task 1 #{uuid} * [ ] test task 2 #{uuid} """ tasks = [ dict(description="test task 1"), dict(description="test task 2"), ] def execute(self): self.client.type('V2gg') # Go to the second line self.client.feedkeys(":TaskWikiAnnotate This is annotation.") self.client.type('<Enter>') sleep(2) for task in self.tasks: task.refresh() annotation = self.tasks[0]['annotations'] assert annotation != [] assert annotation[0]['description'] == "This is annotation." annotation = self.tasks[1]['annotations'] assert annotation != [] assert annotation[0]['description'] == "This is annotation."
<commit_before><commit_msg>test: Add tests for TaskWikiAnnotation command<commit_after>
from tests.base import IntegrationTest from time import sleep class TestAnnotateAction(IntegrationTest): viminput = """ * [ ] test task 1 #{uuid} * [ ] test task 2 #{uuid} """ tasks = [ dict(description="test task 1"), dict(description="test task 2"), ] def execute(self): self.command( "TaskWikiAnnotate This is annotation.", regex="Task \"test task 1\" annotated.$", lines=1) self.tasks[0].refresh() annotation = self.tasks[0]['annotations'] assert annotation != [] assert annotation[0]['description'] == "This is annotation." class TestAnnotateActionMoved(IntegrationTest): viminput = """ * [ ] test task 1 #{uuid} * [ ] test task 2 #{uuid} """ tasks = [ dict(description="test task 1"), dict(description="test task 2"), ] def execute(self): self.client.type('2gg') # Go to the second line self.command( "TaskWikiAnnotate This is annotation.", regex="Task \"test task 2\" annotated.$", lines=1) self.tasks[1].refresh() annotation = self.tasks[1]['annotations'] assert annotation != [] assert annotation[0]['description'] == "This is annotation." class TestAnnotateActionRange(IntegrationTest): viminput = """ * [ ] test task 1 #{uuid} * [ ] test task 2 #{uuid} """ tasks = [ dict(description="test task 1"), dict(description="test task 2"), ] def execute(self): self.client.type('V2gg') # Go to the second line self.client.feedkeys(":TaskWikiAnnotate This is annotation.") self.client.type('<Enter>') sleep(2) for task in self.tasks: task.refresh() annotation = self.tasks[0]['annotations'] assert annotation != [] assert annotation[0]['description'] == "This is annotation." annotation = self.tasks[1]['annotations'] assert annotation != [] assert annotation[0]['description'] == "This is annotation."
test: Add tests for TaskWikiAnnotation commandfrom tests.base import IntegrationTest from time import sleep class TestAnnotateAction(IntegrationTest): viminput = """ * [ ] test task 1 #{uuid} * [ ] test task 2 #{uuid} """ tasks = [ dict(description="test task 1"), dict(description="test task 2"), ] def execute(self): self.command( "TaskWikiAnnotate This is annotation.", regex="Task \"test task 1\" annotated.$", lines=1) self.tasks[0].refresh() annotation = self.tasks[0]['annotations'] assert annotation != [] assert annotation[0]['description'] == "This is annotation." class TestAnnotateActionMoved(IntegrationTest): viminput = """ * [ ] test task 1 #{uuid} * [ ] test task 2 #{uuid} """ tasks = [ dict(description="test task 1"), dict(description="test task 2"), ] def execute(self): self.client.type('2gg') # Go to the second line self.command( "TaskWikiAnnotate This is annotation.", regex="Task \"test task 2\" annotated.$", lines=1) self.tasks[1].refresh() annotation = self.tasks[1]['annotations'] assert annotation != [] assert annotation[0]['description'] == "This is annotation." class TestAnnotateActionRange(IntegrationTest): viminput = """ * [ ] test task 1 #{uuid} * [ ] test task 2 #{uuid} """ tasks = [ dict(description="test task 1"), dict(description="test task 2"), ] def execute(self): self.client.type('V2gg') # Go to the second line self.client.feedkeys(":TaskWikiAnnotate This is annotation.") self.client.type('<Enter>') sleep(2) for task in self.tasks: task.refresh() annotation = self.tasks[0]['annotations'] assert annotation != [] assert annotation[0]['description'] == "This is annotation." annotation = self.tasks[1]['annotations'] assert annotation != [] assert annotation[0]['description'] == "This is annotation."
<commit_before><commit_msg>test: Add tests for TaskWikiAnnotation command<commit_after>from tests.base import IntegrationTest from time import sleep class TestAnnotateAction(IntegrationTest): viminput = """ * [ ] test task 1 #{uuid} * [ ] test task 2 #{uuid} """ tasks = [ dict(description="test task 1"), dict(description="test task 2"), ] def execute(self): self.command( "TaskWikiAnnotate This is annotation.", regex="Task \"test task 1\" annotated.$", lines=1) self.tasks[0].refresh() annotation = self.tasks[0]['annotations'] assert annotation != [] assert annotation[0]['description'] == "This is annotation." class TestAnnotateActionMoved(IntegrationTest): viminput = """ * [ ] test task 1 #{uuid} * [ ] test task 2 #{uuid} """ tasks = [ dict(description="test task 1"), dict(description="test task 2"), ] def execute(self): self.client.type('2gg') # Go to the second line self.command( "TaskWikiAnnotate This is annotation.", regex="Task \"test task 2\" annotated.$", lines=1) self.tasks[1].refresh() annotation = self.tasks[1]['annotations'] assert annotation != [] assert annotation[0]['description'] == "This is annotation." class TestAnnotateActionRange(IntegrationTest): viminput = """ * [ ] test task 1 #{uuid} * [ ] test task 2 #{uuid} """ tasks = [ dict(description="test task 1"), dict(description="test task 2"), ] def execute(self): self.client.type('V2gg') # Go to the second line self.client.feedkeys(":TaskWikiAnnotate This is annotation.") self.client.type('<Enter>') sleep(2) for task in self.tasks: task.refresh() annotation = self.tasks[0]['annotations'] assert annotation != [] assert annotation[0]['description'] == "This is annotation." annotation = self.tasks[1]['annotations'] assert annotation != [] assert annotation[0]['description'] == "This is annotation."
5b18671bfe64ab6a6f29465a40ffcdb372633ea6
data/migrations/update-wof-l10n.py
data/migrations/update-wof-l10n.py
# This expects a local checkout of whos on first data. It will fetch # the data from the on disk location for all existing neighbourhoods # in the database, and generate updates for all neighbourhoods that # have other local names. from tilequeue.wof import make_wof_filesystem_neighbourhood_fetcher from tilequeue.wof import make_wof_model import os import sys import yaml cfg_path = '/etc/tilequeue/config.yaml' wof_path = '/var/whosonfirst-data' if not os.path.exists(cfg_path): print 'No tilequeue config found. Not updating wof l10n.' sys.exit(0) if not os.path.exists(wof_path): print 'No woftilequeue data found. Not updating wof l10n.' sys.exit(1) with open(cfg_path) as fh: yaml_data = yaml.load(fh) wof_cfg = yaml_data['wof'] psql_cfg = wof_cfg['postgresql'] wof_model = make_wof_model(psql_cfg) metas = wof_model.find_previous_neighbourhood_meta() n_threads = 50 fs_neighbourhood_fetcher = make_wof_filesystem_neighbourhood_fetcher( wof_path, n_threads) ns, failures = fs_neighbourhood_fetcher.fetch_raw_neighbourhoods(metas) if failures: print 'Errors fetching neighbourhoods' # if there are more than 10, the rest will probably have failed # for the same reason for failure in failures[:10]: print 'Failed fetching %d: %r - %r' % ( failure.wof_id, failure.reason, failure.message_one_line) sys.exit(1) ns_to_update = [] for n in ns: if n.l10n_names: ns_to_update.append(n) ns_to_add = [] ids_to_remove = [] wof_model.sync_neighbourhoods(ns_to_add, ns_to_update, ids_to_remove)
Add migration for wof l10n names
Add migration for wof l10n names
Python
mit
mapzen/vector-datasource,mapzen/vector-datasource,mapzen/vector-datasource
Add migration for wof l10n names
# This expects a local checkout of whos on first data. It will fetch # the data from the on disk location for all existing neighbourhoods # in the database, and generate updates for all neighbourhoods that # have other local names. from tilequeue.wof import make_wof_filesystem_neighbourhood_fetcher from tilequeue.wof import make_wof_model import os import sys import yaml cfg_path = '/etc/tilequeue/config.yaml' wof_path = '/var/whosonfirst-data' if not os.path.exists(cfg_path): print 'No tilequeue config found. Not updating wof l10n.' sys.exit(0) if not os.path.exists(wof_path): print 'No woftilequeue data found. Not updating wof l10n.' sys.exit(1) with open(cfg_path) as fh: yaml_data = yaml.load(fh) wof_cfg = yaml_data['wof'] psql_cfg = wof_cfg['postgresql'] wof_model = make_wof_model(psql_cfg) metas = wof_model.find_previous_neighbourhood_meta() n_threads = 50 fs_neighbourhood_fetcher = make_wof_filesystem_neighbourhood_fetcher( wof_path, n_threads) ns, failures = fs_neighbourhood_fetcher.fetch_raw_neighbourhoods(metas) if failures: print 'Errors fetching neighbourhoods' # if there are more than 10, the rest will probably have failed # for the same reason for failure in failures[:10]: print 'Failed fetching %d: %r - %r' % ( failure.wof_id, failure.reason, failure.message_one_line) sys.exit(1) ns_to_update = [] for n in ns: if n.l10n_names: ns_to_update.append(n) ns_to_add = [] ids_to_remove = [] wof_model.sync_neighbourhoods(ns_to_add, ns_to_update, ids_to_remove)
<commit_before><commit_msg>Add migration for wof l10n names<commit_after>
# This expects a local checkout of whos on first data. It will fetch # the data from the on disk location for all existing neighbourhoods # in the database, and generate updates for all neighbourhoods that # have other local names. from tilequeue.wof import make_wof_filesystem_neighbourhood_fetcher from tilequeue.wof import make_wof_model import os import sys import yaml cfg_path = '/etc/tilequeue/config.yaml' wof_path = '/var/whosonfirst-data' if not os.path.exists(cfg_path): print 'No tilequeue config found. Not updating wof l10n.' sys.exit(0) if not os.path.exists(wof_path): print 'No woftilequeue data found. Not updating wof l10n.' sys.exit(1) with open(cfg_path) as fh: yaml_data = yaml.load(fh) wof_cfg = yaml_data['wof'] psql_cfg = wof_cfg['postgresql'] wof_model = make_wof_model(psql_cfg) metas = wof_model.find_previous_neighbourhood_meta() n_threads = 50 fs_neighbourhood_fetcher = make_wof_filesystem_neighbourhood_fetcher( wof_path, n_threads) ns, failures = fs_neighbourhood_fetcher.fetch_raw_neighbourhoods(metas) if failures: print 'Errors fetching neighbourhoods' # if there are more than 10, the rest will probably have failed # for the same reason for failure in failures[:10]: print 'Failed fetching %d: %r - %r' % ( failure.wof_id, failure.reason, failure.message_one_line) sys.exit(1) ns_to_update = [] for n in ns: if n.l10n_names: ns_to_update.append(n) ns_to_add = [] ids_to_remove = [] wof_model.sync_neighbourhoods(ns_to_add, ns_to_update, ids_to_remove)
Add migration for wof l10n names# This expects a local checkout of whos on first data. It will fetch # the data from the on disk location for all existing neighbourhoods # in the database, and generate updates for all neighbourhoods that # have other local names. from tilequeue.wof import make_wof_filesystem_neighbourhood_fetcher from tilequeue.wof import make_wof_model import os import sys import yaml cfg_path = '/etc/tilequeue/config.yaml' wof_path = '/var/whosonfirst-data' if not os.path.exists(cfg_path): print 'No tilequeue config found. Not updating wof l10n.' sys.exit(0) if not os.path.exists(wof_path): print 'No woftilequeue data found. Not updating wof l10n.' sys.exit(1) with open(cfg_path) as fh: yaml_data = yaml.load(fh) wof_cfg = yaml_data['wof'] psql_cfg = wof_cfg['postgresql'] wof_model = make_wof_model(psql_cfg) metas = wof_model.find_previous_neighbourhood_meta() n_threads = 50 fs_neighbourhood_fetcher = make_wof_filesystem_neighbourhood_fetcher( wof_path, n_threads) ns, failures = fs_neighbourhood_fetcher.fetch_raw_neighbourhoods(metas) if failures: print 'Errors fetching neighbourhoods' # if there are more than 10, the rest will probably have failed # for the same reason for failure in failures[:10]: print 'Failed fetching %d: %r - %r' % ( failure.wof_id, failure.reason, failure.message_one_line) sys.exit(1) ns_to_update = [] for n in ns: if n.l10n_names: ns_to_update.append(n) ns_to_add = [] ids_to_remove = [] wof_model.sync_neighbourhoods(ns_to_add, ns_to_update, ids_to_remove)
<commit_before><commit_msg>Add migration for wof l10n names<commit_after># This expects a local checkout of whos on first data. It will fetch # the data from the on disk location for all existing neighbourhoods # in the database, and generate updates for all neighbourhoods that # have other local names. from tilequeue.wof import make_wof_filesystem_neighbourhood_fetcher from tilequeue.wof import make_wof_model import os import sys import yaml cfg_path = '/etc/tilequeue/config.yaml' wof_path = '/var/whosonfirst-data' if not os.path.exists(cfg_path): print 'No tilequeue config found. Not updating wof l10n.' sys.exit(0) if not os.path.exists(wof_path): print 'No woftilequeue data found. Not updating wof l10n.' sys.exit(1) with open(cfg_path) as fh: yaml_data = yaml.load(fh) wof_cfg = yaml_data['wof'] psql_cfg = wof_cfg['postgresql'] wof_model = make_wof_model(psql_cfg) metas = wof_model.find_previous_neighbourhood_meta() n_threads = 50 fs_neighbourhood_fetcher = make_wof_filesystem_neighbourhood_fetcher( wof_path, n_threads) ns, failures = fs_neighbourhood_fetcher.fetch_raw_neighbourhoods(metas) if failures: print 'Errors fetching neighbourhoods' # if there are more than 10, the rest will probably have failed # for the same reason for failure in failures[:10]: print 'Failed fetching %d: %r - %r' % ( failure.wof_id, failure.reason, failure.message_one_line) sys.exit(1) ns_to_update = [] for n in ns: if n.l10n_names: ns_to_update.append(n) ns_to_add = [] ids_to_remove = [] wof_model.sync_neighbourhoods(ns_to_add, ns_to_update, ids_to_remove)
f2afa4c6666b1dc5105bd575d2e93ff691fe1f4c
compare_patches.py
compare_patches.py
import re import sys def ParsePatch(filename): lines = [line.rstrip('\n') for line in open(filename, 'r')] def MatchChar(char): def M(line): if line.startswith(char): return line[1:] return None return M def Matches(i, func): got = [] while i < len(lines): m = func(lines[i]) if m is None: break got.append(m) i += 1 return tuple(got), i hunks = [] i = 0 while i < len(lines): if not lines[i].startswith('@@'): i += 1 continue i += 1 while True: ctx, i = Matches(i, MatchChar(' ')) rem, i = Matches(i, MatchChar('-')) add, i = Matches(i, MatchChar('+')) if len(rem) == 0 and len(add) == 0: break hunks.append((rem, add)) return hunks def WriteHunk(fh, hunk): rem, add = hunk for line in rem: fh.write('-%s\n' % line) for line in add: fh.write('+%s\n' % line) def Diff(list1, list2): set2 = set(list2) for item in list1: if item not in set2: yield item def Main(args): patch_file1 = args[0] patch_file2 = args[1] patch1 = ParsePatch(patch_file1) patch2 = ParsePatch(patch_file2) def Put(patch, dest_file): fh = open(dest_file, 'w') fh.write('%i patches\n' % len(patch)) for hunk in patch: fh.write('\nPatch:\n') WriteHunk(fh, hunk) fh.close() Put(list(Diff(patch1, patch2)), 'out-before') Put(list(Diff(patch2, patch1)), 'out-after') if __name__ == '__main__': Main(sys.argv[1:])
Add initial version of tool to compare two patches
Add initial version of tool to compare two patches This doesn't track the filenames that patch hunks apply to yet. It also ignores patch hunks' contexts.
Python
bsd-3-clause
mseaborn/compare-patches
Add initial version of tool to compare two patches This doesn't track the filenames that patch hunks apply to yet. It also ignores patch hunks' contexts.
import re import sys def ParsePatch(filename): lines = [line.rstrip('\n') for line in open(filename, 'r')] def MatchChar(char): def M(line): if line.startswith(char): return line[1:] return None return M def Matches(i, func): got = [] while i < len(lines): m = func(lines[i]) if m is None: break got.append(m) i += 1 return tuple(got), i hunks = [] i = 0 while i < len(lines): if not lines[i].startswith('@@'): i += 1 continue i += 1 while True: ctx, i = Matches(i, MatchChar(' ')) rem, i = Matches(i, MatchChar('-')) add, i = Matches(i, MatchChar('+')) if len(rem) == 0 and len(add) == 0: break hunks.append((rem, add)) return hunks def WriteHunk(fh, hunk): rem, add = hunk for line in rem: fh.write('-%s\n' % line) for line in add: fh.write('+%s\n' % line) def Diff(list1, list2): set2 = set(list2) for item in list1: if item not in set2: yield item def Main(args): patch_file1 = args[0] patch_file2 = args[1] patch1 = ParsePatch(patch_file1) patch2 = ParsePatch(patch_file2) def Put(patch, dest_file): fh = open(dest_file, 'w') fh.write('%i patches\n' % len(patch)) for hunk in patch: fh.write('\nPatch:\n') WriteHunk(fh, hunk) fh.close() Put(list(Diff(patch1, patch2)), 'out-before') Put(list(Diff(patch2, patch1)), 'out-after') if __name__ == '__main__': Main(sys.argv[1:])
<commit_before><commit_msg>Add initial version of tool to compare two patches This doesn't track the filenames that patch hunks apply to yet. It also ignores patch hunks' contexts.<commit_after>
import re import sys def ParsePatch(filename): lines = [line.rstrip('\n') for line in open(filename, 'r')] def MatchChar(char): def M(line): if line.startswith(char): return line[1:] return None return M def Matches(i, func): got = [] while i < len(lines): m = func(lines[i]) if m is None: break got.append(m) i += 1 return tuple(got), i hunks = [] i = 0 while i < len(lines): if not lines[i].startswith('@@'): i += 1 continue i += 1 while True: ctx, i = Matches(i, MatchChar(' ')) rem, i = Matches(i, MatchChar('-')) add, i = Matches(i, MatchChar('+')) if len(rem) == 0 and len(add) == 0: break hunks.append((rem, add)) return hunks def WriteHunk(fh, hunk): rem, add = hunk for line in rem: fh.write('-%s\n' % line) for line in add: fh.write('+%s\n' % line) def Diff(list1, list2): set2 = set(list2) for item in list1: if item not in set2: yield item def Main(args): patch_file1 = args[0] patch_file2 = args[1] patch1 = ParsePatch(patch_file1) patch2 = ParsePatch(patch_file2) def Put(patch, dest_file): fh = open(dest_file, 'w') fh.write('%i patches\n' % len(patch)) for hunk in patch: fh.write('\nPatch:\n') WriteHunk(fh, hunk) fh.close() Put(list(Diff(patch1, patch2)), 'out-before') Put(list(Diff(patch2, patch1)), 'out-after') if __name__ == '__main__': Main(sys.argv[1:])
Add initial version of tool to compare two patches This doesn't track the filenames that patch hunks apply to yet. It also ignores patch hunks' contexts. import re import sys def ParsePatch(filename): lines = [line.rstrip('\n') for line in open(filename, 'r')] def MatchChar(char): def M(line): if line.startswith(char): return line[1:] return None return M def Matches(i, func): got = [] while i < len(lines): m = func(lines[i]) if m is None: break got.append(m) i += 1 return tuple(got), i hunks = [] i = 0 while i < len(lines): if not lines[i].startswith('@@'): i += 1 continue i += 1 while True: ctx, i = Matches(i, MatchChar(' ')) rem, i = Matches(i, MatchChar('-')) add, i = Matches(i, MatchChar('+')) if len(rem) == 0 and len(add) == 0: break hunks.append((rem, add)) return hunks def WriteHunk(fh, hunk): rem, add = hunk for line in rem: fh.write('-%s\n' % line) for line in add: fh.write('+%s\n' % line) def Diff(list1, list2): set2 = set(list2) for item in list1: if item not in set2: yield item def Main(args): patch_file1 = args[0] patch_file2 = args[1] patch1 = ParsePatch(patch_file1) patch2 = ParsePatch(patch_file2) def Put(patch, dest_file): fh = open(dest_file, 'w') fh.write('%i patches\n' % len(patch)) for hunk in patch: fh.write('\nPatch:\n') WriteHunk(fh, hunk) fh.close() Put(list(Diff(patch1, patch2)), 'out-before') Put(list(Diff(patch2, patch1)), 'out-after') if __name__ == '__main__': Main(sys.argv[1:])
<commit_before><commit_msg>Add initial version of tool to compare two patches This doesn't track the filenames that patch hunks apply to yet. It also ignores patch hunks' contexts.<commit_after> import re import sys def ParsePatch(filename): lines = [line.rstrip('\n') for line in open(filename, 'r')] def MatchChar(char): def M(line): if line.startswith(char): return line[1:] return None return M def Matches(i, func): got = [] while i < len(lines): m = func(lines[i]) if m is None: break got.append(m) i += 1 return tuple(got), i hunks = [] i = 0 while i < len(lines): if not lines[i].startswith('@@'): i += 1 continue i += 1 while True: ctx, i = Matches(i, MatchChar(' ')) rem, i = Matches(i, MatchChar('-')) add, i = Matches(i, MatchChar('+')) if len(rem) == 0 and len(add) == 0: break hunks.append((rem, add)) return hunks def WriteHunk(fh, hunk): rem, add = hunk for line in rem: fh.write('-%s\n' % line) for line in add: fh.write('+%s\n' % line) def Diff(list1, list2): set2 = set(list2) for item in list1: if item not in set2: yield item def Main(args): patch_file1 = args[0] patch_file2 = args[1] patch1 = ParsePatch(patch_file1) patch2 = ParsePatch(patch_file2) def Put(patch, dest_file): fh = open(dest_file, 'w') fh.write('%i patches\n' % len(patch)) for hunk in patch: fh.write('\nPatch:\n') WriteHunk(fh, hunk) fh.close() Put(list(Diff(patch1, patch2)), 'out-before') Put(list(Diff(patch2, patch1)), 'out-after') if __name__ == '__main__': Main(sys.argv[1:])
353a4c4758339b559fcfe70c5fdc7659f9f13560
examples/find-duplicated-contacts.py
examples/find-duplicated-contacts.py
#! /usr/bin/env python # -*- coding: utf-8 -*- """ Find components defining more than one contact with the same role. In older versions of PDC, it was possible to have multiple contacts with the same role. When limits for cardinality of this relationship were introduced, we need to find all components that would not pass the new rules. This script does exactly that. It iterates over all global and release components and prints details about any component with duplicate contacts. """ import pdc_client client = pdc_client.PDCClient('prod') def run(resource): print 'Running tests for {}'.format(resource) for component in pdc_client.get_paged(client[resource]._): pk = component['id'] name = component['name'] release = component.get('release', {}).get('release_id', '[global]') seen_roles = set() for contact in component['contacts']: if contact['contact_role'] in seen_roles: print 'Duplicated roles for {}:{}/{}'.format(pk, release, name) seen_roles.add(contact['contact_role']) print '' run('global-components') run('release-components')
Add example script to find components with duplicate contacts
Add example script to find components with duplicate contacts While this script is not terribly useful for day-to-day life, it can serve as an example on how to use the client. JIRA: PDC-1105
Python
mit
xychu/product-definition-center,product-definition-center/product-definition-center,product-definition-center/product-definition-center,lao605/product-definition-center,lao605/product-definition-center,lao605/product-definition-center,xychu/product-definition-center,xychu/product-definition-center,tzhaoredhat/automation,product-definition-center/product-definition-center,release-engineering/product-definition-center,tzhaoredhat/automation,pombredanne/product-definition-center,pombredanne/product-definition-center,tzhaoredhat/automation,tzhaoredhat/automation,release-engineering/product-definition-center,pombredanne/product-definition-center,lao605/product-definition-center,release-engineering/product-definition-center,xychu/product-definition-center,release-engineering/product-definition-center,product-definition-center/product-definition-center,pombredanne/product-definition-center
Add example script to find components with duplicate contacts While this script is not terribly useful for day-to-day life, it can serve as an example on how to use the client. JIRA: PDC-1105
#! /usr/bin/env python # -*- coding: utf-8 -*- """ Find components defining more than one contact with the same role. In older versions of PDC, it was possible to have multiple contacts with the same role. When limits for cardinality of this relationship were introduced, we need to find all components that would not pass the new rules. This script does exactly that. It iterates over all global and release components and prints details about any component with duplicate contacts. """ import pdc_client client = pdc_client.PDCClient('prod') def run(resource): print 'Running tests for {}'.format(resource) for component in pdc_client.get_paged(client[resource]._): pk = component['id'] name = component['name'] release = component.get('release', {}).get('release_id', '[global]') seen_roles = set() for contact in component['contacts']: if contact['contact_role'] in seen_roles: print 'Duplicated roles for {}:{}/{}'.format(pk, release, name) seen_roles.add(contact['contact_role']) print '' run('global-components') run('release-components')
<commit_before><commit_msg>Add example script to find components with duplicate contacts While this script is not terribly useful for day-to-day life, it can serve as an example on how to use the client. JIRA: PDC-1105<commit_after>
#! /usr/bin/env python # -*- coding: utf-8 -*- """ Find components defining more than one contact with the same role. In older versions of PDC, it was possible to have multiple contacts with the same role. When limits for cardinality of this relationship were introduced, we need to find all components that would not pass the new rules. This script does exactly that. It iterates over all global and release components and prints details about any component with duplicate contacts. """ import pdc_client client = pdc_client.PDCClient('prod') def run(resource): print 'Running tests for {}'.format(resource) for component in pdc_client.get_paged(client[resource]._): pk = component['id'] name = component['name'] release = component.get('release', {}).get('release_id', '[global]') seen_roles = set() for contact in component['contacts']: if contact['contact_role'] in seen_roles: print 'Duplicated roles for {}:{}/{}'.format(pk, release, name) seen_roles.add(contact['contact_role']) print '' run('global-components') run('release-components')
Add example script to find components with duplicate contacts While this script is not terribly useful for day-to-day life, it can serve as an example on how to use the client. JIRA: PDC-1105#! /usr/bin/env python # -*- coding: utf-8 -*- """ Find components defining more than one contact with the same role. In older versions of PDC, it was possible to have multiple contacts with the same role. When limits for cardinality of this relationship were introduced, we need to find all components that would not pass the new rules. This script does exactly that. It iterates over all global and release components and prints details about any component with duplicate contacts. """ import pdc_client client = pdc_client.PDCClient('prod') def run(resource): print 'Running tests for {}'.format(resource) for component in pdc_client.get_paged(client[resource]._): pk = component['id'] name = component['name'] release = component.get('release', {}).get('release_id', '[global]') seen_roles = set() for contact in component['contacts']: if contact['contact_role'] in seen_roles: print 'Duplicated roles for {}:{}/{}'.format(pk, release, name) seen_roles.add(contact['contact_role']) print '' run('global-components') run('release-components')
<commit_before><commit_msg>Add example script to find components with duplicate contacts While this script is not terribly useful for day-to-day life, it can serve as an example on how to use the client. JIRA: PDC-1105<commit_after>#! /usr/bin/env python # -*- coding: utf-8 -*- """ Find components defining more than one contact with the same role. In older versions of PDC, it was possible to have multiple contacts with the same role. When limits for cardinality of this relationship were introduced, we need to find all components that would not pass the new rules. This script does exactly that. It iterates over all global and release components and prints details about any component with duplicate contacts. """ import pdc_client client = pdc_client.PDCClient('prod') def run(resource): print 'Running tests for {}'.format(resource) for component in pdc_client.get_paged(client[resource]._): pk = component['id'] name = component['name'] release = component.get('release', {}).get('release_id', '[global]') seen_roles = set() for contact in component['contacts']: if contact['contact_role'] in seen_roles: print 'Duplicated roles for {}:{}/{}'.format(pk, release, name) seen_roles.add(contact['contact_role']) print '' run('global-components') run('release-components')
60e879d37f691132c23efb12f9709223f69354f5
examples/ideal-helices/test_cases.py
examples/ideal-helices/test_cases.py
#!/usr/bin/env ccp4-python ''' Created on 29 Dec 2015 @author: jmht ''' import cPickle import os import sys AMPLE_DIR = os.sep.join(os.path.abspath(os.path.dirname(__file__)).split(os.sep)[ :-2 ]) sys.path.append(os.path.join(AMPLE_DIR,'python')) import test_funcs test_dict = {} ############################################################################### # # Ideal Helices # ############################################################################### args_ideal_helices = [ '-fasta', '2OVC.fasta', '-mtz', '2OVC-cad.mtz', '-ideal_helices', 'True', '-no_gui', 'True', ] def test_ideal_helices(resultsd_pkl): with open(resultsd_pkl) as f: ad = cPickle.load(f) if not ('mrbump_results' in ad or len(ad['mrbump_results'])): raise AmpleException("No MRBUMP results") if not ad['success']: raise AmpleException("Job did no succeed") if not ad['mrbump_results'][0]['SHELXE_CC'] > 25: raise AmpleException("SHELXE_CC criteria not met") return test_dict['ideal_helices'] = { 'args' : args_ideal_helices, 'test' : test_ideal_helices } ############################################################################### # # End Test Setup # ############################################################################### if __name__ == '__main__': test_funcs.parse_args(test_dict)
Test case for ideal helices
Test case for ideal helices
Python
bsd-3-clause
linucks/ample,rigdenlab/ample,rigdenlab/ample,linucks/ample
Test case for ideal helices
#!/usr/bin/env ccp4-python ''' Created on 29 Dec 2015 @author: jmht ''' import cPickle import os import sys AMPLE_DIR = os.sep.join(os.path.abspath(os.path.dirname(__file__)).split(os.sep)[ :-2 ]) sys.path.append(os.path.join(AMPLE_DIR,'python')) import test_funcs test_dict = {} ############################################################################### # # Ideal Helices # ############################################################################### args_ideal_helices = [ '-fasta', '2OVC.fasta', '-mtz', '2OVC-cad.mtz', '-ideal_helices', 'True', '-no_gui', 'True', ] def test_ideal_helices(resultsd_pkl): with open(resultsd_pkl) as f: ad = cPickle.load(f) if not ('mrbump_results' in ad or len(ad['mrbump_results'])): raise AmpleException("No MRBUMP results") if not ad['success']: raise AmpleException("Job did no succeed") if not ad['mrbump_results'][0]['SHELXE_CC'] > 25: raise AmpleException("SHELXE_CC criteria not met") return test_dict['ideal_helices'] = { 'args' : args_ideal_helices, 'test' : test_ideal_helices } ############################################################################### # # End Test Setup # ############################################################################### if __name__ == '__main__': test_funcs.parse_args(test_dict)
<commit_before><commit_msg>Test case for ideal helices<commit_after>
#!/usr/bin/env ccp4-python ''' Created on 29 Dec 2015 @author: jmht ''' import cPickle import os import sys AMPLE_DIR = os.sep.join(os.path.abspath(os.path.dirname(__file__)).split(os.sep)[ :-2 ]) sys.path.append(os.path.join(AMPLE_DIR,'python')) import test_funcs test_dict = {} ############################################################################### # # Ideal Helices # ############################################################################### args_ideal_helices = [ '-fasta', '2OVC.fasta', '-mtz', '2OVC-cad.mtz', '-ideal_helices', 'True', '-no_gui', 'True', ] def test_ideal_helices(resultsd_pkl): with open(resultsd_pkl) as f: ad = cPickle.load(f) if not ('mrbump_results' in ad or len(ad['mrbump_results'])): raise AmpleException("No MRBUMP results") if not ad['success']: raise AmpleException("Job did no succeed") if not ad['mrbump_results'][0]['SHELXE_CC'] > 25: raise AmpleException("SHELXE_CC criteria not met") return test_dict['ideal_helices'] = { 'args' : args_ideal_helices, 'test' : test_ideal_helices } ############################################################################### # # End Test Setup # ############################################################################### if __name__ == '__main__': test_funcs.parse_args(test_dict)
Test case for ideal helices#!/usr/bin/env ccp4-python ''' Created on 29 Dec 2015 @author: jmht ''' import cPickle import os import sys AMPLE_DIR = os.sep.join(os.path.abspath(os.path.dirname(__file__)).split(os.sep)[ :-2 ]) sys.path.append(os.path.join(AMPLE_DIR,'python')) import test_funcs test_dict = {} ############################################################################### # # Ideal Helices # ############################################################################### args_ideal_helices = [ '-fasta', '2OVC.fasta', '-mtz', '2OVC-cad.mtz', '-ideal_helices', 'True', '-no_gui', 'True', ] def test_ideal_helices(resultsd_pkl): with open(resultsd_pkl) as f: ad = cPickle.load(f) if not ('mrbump_results' in ad or len(ad['mrbump_results'])): raise AmpleException("No MRBUMP results") if not ad['success']: raise AmpleException("Job did no succeed") if not ad['mrbump_results'][0]['SHELXE_CC'] > 25: raise AmpleException("SHELXE_CC criteria not met") return test_dict['ideal_helices'] = { 'args' : args_ideal_helices, 'test' : test_ideal_helices } ############################################################################### # # End Test Setup # ############################################################################### if __name__ == '__main__': test_funcs.parse_args(test_dict)
<commit_before><commit_msg>Test case for ideal helices<commit_after>#!/usr/bin/env ccp4-python ''' Created on 29 Dec 2015 @author: jmht ''' import cPickle import os import sys AMPLE_DIR = os.sep.join(os.path.abspath(os.path.dirname(__file__)).split(os.sep)[ :-2 ]) sys.path.append(os.path.join(AMPLE_DIR,'python')) import test_funcs test_dict = {} ############################################################################### # # Ideal Helices # ############################################################################### args_ideal_helices = [ '-fasta', '2OVC.fasta', '-mtz', '2OVC-cad.mtz', '-ideal_helices', 'True', '-no_gui', 'True', ] def test_ideal_helices(resultsd_pkl): with open(resultsd_pkl) as f: ad = cPickle.load(f) if not ('mrbump_results' in ad or len(ad['mrbump_results'])): raise AmpleException("No MRBUMP results") if not ad['success']: raise AmpleException("Job did no succeed") if not ad['mrbump_results'][0]['SHELXE_CC'] > 25: raise AmpleException("SHELXE_CC criteria not met") return test_dict['ideal_helices'] = { 'args' : args_ideal_helices, 'test' : test_ideal_helices } ############################################################################### # # End Test Setup # ############################################################################### if __name__ == '__main__': test_funcs.parse_args(test_dict)
9342e0f699c97f94274d30040b178484bbafff94
mpld3/test_plots/test_date_ticks.py
mpld3/test_plots/test_date_ticks.py
"""Plot to test custom date axis tick locations and labels""" from datetime import datetime import matplotlib.pyplot as plt import mpld3 def create_plot(): times = [datetime(2013, 12, i) for i in range(1,9)] ticks = [times[2],times[3],times[-2]] labels = [t.strftime("%Y-%m-%d") for t in ticks] plt.plot_date(times, times, xdate=True, ydate=True) plt.xticks(ticks,labels) plt.yticks(ticks) return plt.gcf() def test_date(): fig = create_plot() html = mpld3.fig_to_html(fig) plt.close(fig) if __name__ == "__main__": mpld3.show(create_plot(),template_type='simple')
Add test plot for custom date ticks
Add test plot for custom date ticks
Python
bsd-3-clause
jakevdp/mpld3,aflaxman/mpld3,jakevdp/mpld3,etgalloway/mpld3,e-koch/mpld3,kdheepak89/mpld3,etgalloway/mpld3,aflaxman/mpld3,e-koch/mpld3,kdheepak89/mpld3,mpld3/mpld3,mpld3/mpld3
Add test plot for custom date ticks
"""Plot to test custom date axis tick locations and labels""" from datetime import datetime import matplotlib.pyplot as plt import mpld3 def create_plot(): times = [datetime(2013, 12, i) for i in range(1,9)] ticks = [times[2],times[3],times[-2]] labels = [t.strftime("%Y-%m-%d") for t in ticks] plt.plot_date(times, times, xdate=True, ydate=True) plt.xticks(ticks,labels) plt.yticks(ticks) return plt.gcf() def test_date(): fig = create_plot() html = mpld3.fig_to_html(fig) plt.close(fig) if __name__ == "__main__": mpld3.show(create_plot(),template_type='simple')
<commit_before><commit_msg>Add test plot for custom date ticks<commit_after>
"""Plot to test custom date axis tick locations and labels""" from datetime import datetime import matplotlib.pyplot as plt import mpld3 def create_plot(): times = [datetime(2013, 12, i) for i in range(1,9)] ticks = [times[2],times[3],times[-2]] labels = [t.strftime("%Y-%m-%d") for t in ticks] plt.plot_date(times, times, xdate=True, ydate=True) plt.xticks(ticks,labels) plt.yticks(ticks) return plt.gcf() def test_date(): fig = create_plot() html = mpld3.fig_to_html(fig) plt.close(fig) if __name__ == "__main__": mpld3.show(create_plot(),template_type='simple')
Add test plot for custom date ticks"""Plot to test custom date axis tick locations and labels""" from datetime import datetime import matplotlib.pyplot as plt import mpld3 def create_plot(): times = [datetime(2013, 12, i) for i in range(1,9)] ticks = [times[2],times[3],times[-2]] labels = [t.strftime("%Y-%m-%d") for t in ticks] plt.plot_date(times, times, xdate=True, ydate=True) plt.xticks(ticks,labels) plt.yticks(ticks) return plt.gcf() def test_date(): fig = create_plot() html = mpld3.fig_to_html(fig) plt.close(fig) if __name__ == "__main__": mpld3.show(create_plot(),template_type='simple')
<commit_before><commit_msg>Add test plot for custom date ticks<commit_after>"""Plot to test custom date axis tick locations and labels""" from datetime import datetime import matplotlib.pyplot as plt import mpld3 def create_plot(): times = [datetime(2013, 12, i) for i in range(1,9)] ticks = [times[2],times[3],times[-2]] labels = [t.strftime("%Y-%m-%d") for t in ticks] plt.plot_date(times, times, xdate=True, ydate=True) plt.xticks(ticks,labels) plt.yticks(ticks) return plt.gcf() def test_date(): fig = create_plot() html = mpld3.fig_to_html(fig) plt.close(fig) if __name__ == "__main__": mpld3.show(create_plot(),template_type='simple')
845cc0cb8acc28d328a89af77cddb59af666f2d0
examples/plot_implied_timescales.py
examples/plot_implied_timescales.py
""" Implied Timescales Plot =============== """ from msmbuilder.example_datasets import FsPeptide from msmbuilder.featurizer import DihedralFeaturizer from msmbuilder.decomposition import tICA from msmbuilder.cluster import MiniBatchKMeans from msmbuilder.msm import MarkovStateModel import numpy as np import msmexplorer as msme rs = np.random.RandomState(42) # Load Fs Peptide Data trajs = FsPeptide().get().trajectories # Extract Backbone Dihedrals featurizer = DihedralFeaturizer(types=['phi', 'psi']) diheds = featurizer.fit_transform(trajs) # Perform Dimensionality Reduction tica_model = tICA(lag_time=2, n_components=2) tica_trajs = tica_model.fit_transform(diheds) # Perform Clustering clusterer = MiniBatchKMeans(n_clusters=100, random_state=rs) clustered_trajs = clusterer.fit_transform(tica_trajs) lag_times = [1, 10, 50, 100, 200, 250, 500] msm_objs = [] for lag in lag_times: # Construct MSM msm = MarkovStateModel(lag_time=lag, n_timescales=5) msm.fit(clustered_trajs) msm_objs.append(msm) # Plot Timescales colors = ['pomegranate', 'beryl', 'tarragon', 'rawdenim', 'carbon'] msme.plot_implied_timescales(msm_objs, color_palette=colors, xlabel='Lag time (frames)', ylabel='Implied Timescales ($ns$)')
Add example of Implied Timescales Plot
Add example of Implied Timescales Plot
Python
mit
msmexplorer/msmexplorer,msmexplorer/msmexplorer
Add example of Implied Timescales Plot
""" Implied Timescales Plot =============== """ from msmbuilder.example_datasets import FsPeptide from msmbuilder.featurizer import DihedralFeaturizer from msmbuilder.decomposition import tICA from msmbuilder.cluster import MiniBatchKMeans from msmbuilder.msm import MarkovStateModel import numpy as np import msmexplorer as msme rs = np.random.RandomState(42) # Load Fs Peptide Data trajs = FsPeptide().get().trajectories # Extract Backbone Dihedrals featurizer = DihedralFeaturizer(types=['phi', 'psi']) diheds = featurizer.fit_transform(trajs) # Perform Dimensionality Reduction tica_model = tICA(lag_time=2, n_components=2) tica_trajs = tica_model.fit_transform(diheds) # Perform Clustering clusterer = MiniBatchKMeans(n_clusters=100, random_state=rs) clustered_trajs = clusterer.fit_transform(tica_trajs) lag_times = [1, 10, 50, 100, 200, 250, 500] msm_objs = [] for lag in lag_times: # Construct MSM msm = MarkovStateModel(lag_time=lag, n_timescales=5) msm.fit(clustered_trajs) msm_objs.append(msm) # Plot Timescales colors = ['pomegranate', 'beryl', 'tarragon', 'rawdenim', 'carbon'] msme.plot_implied_timescales(msm_objs, color_palette=colors, xlabel='Lag time (frames)', ylabel='Implied Timescales ($ns$)')
<commit_before><commit_msg>Add example of Implied Timescales Plot<commit_after>
""" Implied Timescales Plot =============== """ from msmbuilder.example_datasets import FsPeptide from msmbuilder.featurizer import DihedralFeaturizer from msmbuilder.decomposition import tICA from msmbuilder.cluster import MiniBatchKMeans from msmbuilder.msm import MarkovStateModel import numpy as np import msmexplorer as msme rs = np.random.RandomState(42) # Load Fs Peptide Data trajs = FsPeptide().get().trajectories # Extract Backbone Dihedrals featurizer = DihedralFeaturizer(types=['phi', 'psi']) diheds = featurizer.fit_transform(trajs) # Perform Dimensionality Reduction tica_model = tICA(lag_time=2, n_components=2) tica_trajs = tica_model.fit_transform(diheds) # Perform Clustering clusterer = MiniBatchKMeans(n_clusters=100, random_state=rs) clustered_trajs = clusterer.fit_transform(tica_trajs) lag_times = [1, 10, 50, 100, 200, 250, 500] msm_objs = [] for lag in lag_times: # Construct MSM msm = MarkovStateModel(lag_time=lag, n_timescales=5) msm.fit(clustered_trajs) msm_objs.append(msm) # Plot Timescales colors = ['pomegranate', 'beryl', 'tarragon', 'rawdenim', 'carbon'] msme.plot_implied_timescales(msm_objs, color_palette=colors, xlabel='Lag time (frames)', ylabel='Implied Timescales ($ns$)')
Add example of Implied Timescales Plot""" Implied Timescales Plot =============== """ from msmbuilder.example_datasets import FsPeptide from msmbuilder.featurizer import DihedralFeaturizer from msmbuilder.decomposition import tICA from msmbuilder.cluster import MiniBatchKMeans from msmbuilder.msm import MarkovStateModel import numpy as np import msmexplorer as msme rs = np.random.RandomState(42) # Load Fs Peptide Data trajs = FsPeptide().get().trajectories # Extract Backbone Dihedrals featurizer = DihedralFeaturizer(types=['phi', 'psi']) diheds = featurizer.fit_transform(trajs) # Perform Dimensionality Reduction tica_model = tICA(lag_time=2, n_components=2) tica_trajs = tica_model.fit_transform(diheds) # Perform Clustering clusterer = MiniBatchKMeans(n_clusters=100, random_state=rs) clustered_trajs = clusterer.fit_transform(tica_trajs) lag_times = [1, 10, 50, 100, 200, 250, 500] msm_objs = [] for lag in lag_times: # Construct MSM msm = MarkovStateModel(lag_time=lag, n_timescales=5) msm.fit(clustered_trajs) msm_objs.append(msm) # Plot Timescales colors = ['pomegranate', 'beryl', 'tarragon', 'rawdenim', 'carbon'] msme.plot_implied_timescales(msm_objs, color_palette=colors, xlabel='Lag time (frames)', ylabel='Implied Timescales ($ns$)')
<commit_before><commit_msg>Add example of Implied Timescales Plot<commit_after>""" Implied Timescales Plot =============== """ from msmbuilder.example_datasets import FsPeptide from msmbuilder.featurizer import DihedralFeaturizer from msmbuilder.decomposition import tICA from msmbuilder.cluster import MiniBatchKMeans from msmbuilder.msm import MarkovStateModel import numpy as np import msmexplorer as msme rs = np.random.RandomState(42) # Load Fs Peptide Data trajs = FsPeptide().get().trajectories # Extract Backbone Dihedrals featurizer = DihedralFeaturizer(types=['phi', 'psi']) diheds = featurizer.fit_transform(trajs) # Perform Dimensionality Reduction tica_model = tICA(lag_time=2, n_components=2) tica_trajs = tica_model.fit_transform(diheds) # Perform Clustering clusterer = MiniBatchKMeans(n_clusters=100, random_state=rs) clustered_trajs = clusterer.fit_transform(tica_trajs) lag_times = [1, 10, 50, 100, 200, 250, 500] msm_objs = [] for lag in lag_times: # Construct MSM msm = MarkovStateModel(lag_time=lag, n_timescales=5) msm.fit(clustered_trajs) msm_objs.append(msm) # Plot Timescales colors = ['pomegranate', 'beryl', 'tarragon', 'rawdenim', 'carbon'] msme.plot_implied_timescales(msm_objs, color_palette=colors, xlabel='Lag time (frames)', ylabel='Implied Timescales ($ns$)')
79a1f426e22f3c213bbb081f4ca23ccf1a6f61d7
openedx/core/djangoapps/content/block_structure/migrations/0005_trim_leading_slashes_in_data_path.py
openedx/core/djangoapps/content/block_structure/migrations/0005_trim_leading_slashes_in_data_path.py
""" Data migration to convert absolute paths in block_structure.data to be relative. This has only been tested with MySQL, though it should also work for Postgres as well. This is necessary to manually correct absolute paths in the "data" field of the block_structure table. For S3 storage, having a path that starts with "/courses/" puts things in the same place as a path starting with "courses/", but absolute paths are not permitted for FileFields. These values would have always been broken in devstack (because it's not in MEDIA_ROOT), but it used to work for the S3 storages option because the security checking happened at the storage layer, and the path is equivalent in S3 because we just append either value to the bucket's root. However, in Django > 2.2.20, this checking against absolute paths has been added to the FileField itself, and an upgrade attempt started causing write failures to Block Structures. There are separate PRs to fix the config values so that new writes start with a "courses/" prefix. This migration to is fix old entries by removing any leading "/" characters. THIS MIGRATION MUST BE RUN BEFORE UPGRADING TO DJANGO > 2.2.20 IF YOU ARE USING A STORAGE_CLASS IN BLOCK_STRUCTURES_SETTINGS. If you do not specify this setting and only run Block Structures out of memcached, this should not affect you. """ from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('block_structure', '0004_blockstructuremodel_usagekeywithrun'), ] operations = [ migrations.RunSQL( """ UPDATE block_structure SET data = right(data, length(data) - 1) WHERE data like '/%'; """ ) ]
Convert block_structure.data to relative paths (TNL-8335)
fix: Convert block_structure.data to relative paths (TNL-8335) In order to upgrade to Django > 2.2.20, we can't continue to use absolute paths in the block_structure's data FileField. This used to work for S3, but it will not work going forward due to a security fix in Django 2.2.21. This data migration will remove the starting '/' from any paths in the block_structure table. The resulting locations in S3 should be unaffected.
Python
agpl-3.0
eduNEXT/edunext-platform,eduNEXT/edunext-platform,eduNEXT/edunext-platform,eduNEXT/edunext-platform
fix: Convert block_structure.data to relative paths (TNL-8335) In order to upgrade to Django > 2.2.20, we can't continue to use absolute paths in the block_structure's data FileField. This used to work for S3, but it will not work going forward due to a security fix in Django 2.2.21. This data migration will remove the starting '/' from any paths in the block_structure table. The resulting locations in S3 should be unaffected.
""" Data migration to convert absolute paths in block_structure.data to be relative. This has only been tested with MySQL, though it should also work for Postgres as well. This is necessary to manually correct absolute paths in the "data" field of the block_structure table. For S3 storage, having a path that starts with "/courses/" puts things in the same place as a path starting with "courses/", but absolute paths are not permitted for FileFields. These values would have always been broken in devstack (because it's not in MEDIA_ROOT), but it used to work for the S3 storages option because the security checking happened at the storage layer, and the path is equivalent in S3 because we just append either value to the bucket's root. However, in Django > 2.2.20, this checking against absolute paths has been added to the FileField itself, and an upgrade attempt started causing write failures to Block Structures. There are separate PRs to fix the config values so that new writes start with a "courses/" prefix. This migration to is fix old entries by removing any leading "/" characters. THIS MIGRATION MUST BE RUN BEFORE UPGRADING TO DJANGO > 2.2.20 IF YOU ARE USING A STORAGE_CLASS IN BLOCK_STRUCTURES_SETTINGS. If you do not specify this setting and only run Block Structures out of memcached, this should not affect you. """ from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('block_structure', '0004_blockstructuremodel_usagekeywithrun'), ] operations = [ migrations.RunSQL( """ UPDATE block_structure SET data = right(data, length(data) - 1) WHERE data like '/%'; """ ) ]
<commit_before><commit_msg>fix: Convert block_structure.data to relative paths (TNL-8335) In order to upgrade to Django > 2.2.20, we can't continue to use absolute paths in the block_structure's data FileField. This used to work for S3, but it will not work going forward due to a security fix in Django 2.2.21. This data migration will remove the starting '/' from any paths in the block_structure table. The resulting locations in S3 should be unaffected.<commit_after>
""" Data migration to convert absolute paths in block_structure.data to be relative. This has only been tested with MySQL, though it should also work for Postgres as well. This is necessary to manually correct absolute paths in the "data" field of the block_structure table. For S3 storage, having a path that starts with "/courses/" puts things in the same place as a path starting with "courses/", but absolute paths are not permitted for FileFields. These values would have always been broken in devstack (because it's not in MEDIA_ROOT), but it used to work for the S3 storages option because the security checking happened at the storage layer, and the path is equivalent in S3 because we just append either value to the bucket's root. However, in Django > 2.2.20, this checking against absolute paths has been added to the FileField itself, and an upgrade attempt started causing write failures to Block Structures. There are separate PRs to fix the config values so that new writes start with a "courses/" prefix. This migration to is fix old entries by removing any leading "/" characters. THIS MIGRATION MUST BE RUN BEFORE UPGRADING TO DJANGO > 2.2.20 IF YOU ARE USING A STORAGE_CLASS IN BLOCK_STRUCTURES_SETTINGS. If you do not specify this setting and only run Block Structures out of memcached, this should not affect you. """ from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('block_structure', '0004_blockstructuremodel_usagekeywithrun'), ] operations = [ migrations.RunSQL( """ UPDATE block_structure SET data = right(data, length(data) - 1) WHERE data like '/%'; """ ) ]
fix: Convert block_structure.data to relative paths (TNL-8335) In order to upgrade to Django > 2.2.20, we can't continue to use absolute paths in the block_structure's data FileField. This used to work for S3, but it will not work going forward due to a security fix in Django 2.2.21. This data migration will remove the starting '/' from any paths in the block_structure table. The resulting locations in S3 should be unaffected.""" Data migration to convert absolute paths in block_structure.data to be relative. This has only been tested with MySQL, though it should also work for Postgres as well. This is necessary to manually correct absolute paths in the "data" field of the block_structure table. For S3 storage, having a path that starts with "/courses/" puts things in the same place as a path starting with "courses/", but absolute paths are not permitted for FileFields. These values would have always been broken in devstack (because it's not in MEDIA_ROOT), but it used to work for the S3 storages option because the security checking happened at the storage layer, and the path is equivalent in S3 because we just append either value to the bucket's root. However, in Django > 2.2.20, this checking against absolute paths has been added to the FileField itself, and an upgrade attempt started causing write failures to Block Structures. There are separate PRs to fix the config values so that new writes start with a "courses/" prefix. This migration to is fix old entries by removing any leading "/" characters. THIS MIGRATION MUST BE RUN BEFORE UPGRADING TO DJANGO > 2.2.20 IF YOU ARE USING A STORAGE_CLASS IN BLOCK_STRUCTURES_SETTINGS. If you do not specify this setting and only run Block Structures out of memcached, this should not affect you. """ from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('block_structure', '0004_blockstructuremodel_usagekeywithrun'), ] operations = [ migrations.RunSQL( """ UPDATE block_structure SET data = right(data, length(data) - 1) WHERE data like '/%'; """ ) ]
<commit_before><commit_msg>fix: Convert block_structure.data to relative paths (TNL-8335) In order to upgrade to Django > 2.2.20, we can't continue to use absolute paths in the block_structure's data FileField. This used to work for S3, but it will not work going forward due to a security fix in Django 2.2.21. This data migration will remove the starting '/' from any paths in the block_structure table. The resulting locations in S3 should be unaffected.<commit_after>""" Data migration to convert absolute paths in block_structure.data to be relative. This has only been tested with MySQL, though it should also work for Postgres as well. This is necessary to manually correct absolute paths in the "data" field of the block_structure table. For S3 storage, having a path that starts with "/courses/" puts things in the same place as a path starting with "courses/", but absolute paths are not permitted for FileFields. These values would have always been broken in devstack (because it's not in MEDIA_ROOT), but it used to work for the S3 storages option because the security checking happened at the storage layer, and the path is equivalent in S3 because we just append either value to the bucket's root. However, in Django > 2.2.20, this checking against absolute paths has been added to the FileField itself, and an upgrade attempt started causing write failures to Block Structures. There are separate PRs to fix the config values so that new writes start with a "courses/" prefix. This migration to is fix old entries by removing any leading "/" characters. THIS MIGRATION MUST BE RUN BEFORE UPGRADING TO DJANGO > 2.2.20 IF YOU ARE USING A STORAGE_CLASS IN BLOCK_STRUCTURES_SETTINGS. If you do not specify this setting and only run Block Structures out of memcached, this should not affect you. """ from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('block_structure', '0004_blockstructuremodel_usagekeywithrun'), ] operations = [ migrations.RunSQL( """ UPDATE block_structure SET data = right(data, length(data) - 1) WHERE data like '/%'; """ ) ]
079b9f0856e7574c3df10ad5e847b5812f6a2ca8
filter-dictionary/exclude-words.py
filter-dictionary/exclude-words.py
#!/usr/bin/env python3 # # Reads a dictionary and a file that contains a list of words that should be # removed. Then prints the dictionary with those words excluded. import argparse import sys import codecs from pronunciationdictionary import PronunciationDictionary from filetypes import TextFileType, BinaryFileType parser = argparse.ArgumentParser() parser.add_argument('dictionary', type=BinaryFileType('r'), help='the source dictionary') parser.add_argument('wordlist', type=TextFileType('r'), help='file containing a list of words to be excluded') parser.add_argument('-c', '--count', type=int, dest='exclude_count', default=None, help="number of dictionary entries to remove (the default is all the entries in the word list)") parser.add_argument('-k', '--keep-words', action='store_true', default=False, help="leave at least one pronunciation for every word") parser.add_argument('--count-kept', action='store_true', default=False, help="when using --keep-words, count is the number of entries considered, even if they were not removed") args = parser.parse_args() words = args.wordlist.readlines() args.wordlist.close() words = [x.rstrip() for x in words] dictionary = PronunciationDictionary() dictionary.read(args.dictionary) args.dictionary.close() num_deleted = 0 for word in words: if (args.exclude_count is not None) and (num_deleted >= args.exclude_count): break # A trailing colon and number marks a pronunciation variant. colon_pos = word.rfind(':') if colon_pos > 0: pronunciation_id = int(word[colon_pos+1:]) word = word[:colon_pos] if args.keep_words: if dictionary[word].num_pronunciations() <= 1: if args.count_kept: num_deleted += 1 continue dictionary.delete_pronunciation(word, pronunciation_id) else: if args.keep_words: if args.count_kept: num_deleted += 1 continue dictionary.delete_word(word) num_deleted += 1 dictionary.write()
Remove listed files from a pronunciation dictionary.
Remove listed files from a pronunciation dictionary.
Python
apache-2.0
senarvi/senarvi-speech
Remove listed files from a pronunciation dictionary.
#!/usr/bin/env python3 # # Reads a dictionary and a file that contains a list of words that should be # removed. Then prints the dictionary with those words excluded. import argparse import sys import codecs from pronunciationdictionary import PronunciationDictionary from filetypes import TextFileType, BinaryFileType parser = argparse.ArgumentParser() parser.add_argument('dictionary', type=BinaryFileType('r'), help='the source dictionary') parser.add_argument('wordlist', type=TextFileType('r'), help='file containing a list of words to be excluded') parser.add_argument('-c', '--count', type=int, dest='exclude_count', default=None, help="number of dictionary entries to remove (the default is all the entries in the word list)") parser.add_argument('-k', '--keep-words', action='store_true', default=False, help="leave at least one pronunciation for every word") parser.add_argument('--count-kept', action='store_true', default=False, help="when using --keep-words, count is the number of entries considered, even if they were not removed") args = parser.parse_args() words = args.wordlist.readlines() args.wordlist.close() words = [x.rstrip() for x in words] dictionary = PronunciationDictionary() dictionary.read(args.dictionary) args.dictionary.close() num_deleted = 0 for word in words: if (args.exclude_count is not None) and (num_deleted >= args.exclude_count): break # A trailing colon and number marks a pronunciation variant. colon_pos = word.rfind(':') if colon_pos > 0: pronunciation_id = int(word[colon_pos+1:]) word = word[:colon_pos] if args.keep_words: if dictionary[word].num_pronunciations() <= 1: if args.count_kept: num_deleted += 1 continue dictionary.delete_pronunciation(word, pronunciation_id) else: if args.keep_words: if args.count_kept: num_deleted += 1 continue dictionary.delete_word(word) num_deleted += 1 dictionary.write()
<commit_before><commit_msg>Remove listed files from a pronunciation dictionary.<commit_after>
#!/usr/bin/env python3 # # Reads a dictionary and a file that contains a list of words that should be # removed. Then prints the dictionary with those words excluded. import argparse import sys import codecs from pronunciationdictionary import PronunciationDictionary from filetypes import TextFileType, BinaryFileType parser = argparse.ArgumentParser() parser.add_argument('dictionary', type=BinaryFileType('r'), help='the source dictionary') parser.add_argument('wordlist', type=TextFileType('r'), help='file containing a list of words to be excluded') parser.add_argument('-c', '--count', type=int, dest='exclude_count', default=None, help="number of dictionary entries to remove (the default is all the entries in the word list)") parser.add_argument('-k', '--keep-words', action='store_true', default=False, help="leave at least one pronunciation for every word") parser.add_argument('--count-kept', action='store_true', default=False, help="when using --keep-words, count is the number of entries considered, even if they were not removed") args = parser.parse_args() words = args.wordlist.readlines() args.wordlist.close() words = [x.rstrip() for x in words] dictionary = PronunciationDictionary() dictionary.read(args.dictionary) args.dictionary.close() num_deleted = 0 for word in words: if (args.exclude_count is not None) and (num_deleted >= args.exclude_count): break # A trailing colon and number marks a pronunciation variant. colon_pos = word.rfind(':') if colon_pos > 0: pronunciation_id = int(word[colon_pos+1:]) word = word[:colon_pos] if args.keep_words: if dictionary[word].num_pronunciations() <= 1: if args.count_kept: num_deleted += 1 continue dictionary.delete_pronunciation(word, pronunciation_id) else: if args.keep_words: if args.count_kept: num_deleted += 1 continue dictionary.delete_word(word) num_deleted += 1 dictionary.write()
Remove listed files from a pronunciation dictionary.#!/usr/bin/env python3 # # Reads a dictionary and a file that contains a list of words that should be # removed. Then prints the dictionary with those words excluded. import argparse import sys import codecs from pronunciationdictionary import PronunciationDictionary from filetypes import TextFileType, BinaryFileType parser = argparse.ArgumentParser() parser.add_argument('dictionary', type=BinaryFileType('r'), help='the source dictionary') parser.add_argument('wordlist', type=TextFileType('r'), help='file containing a list of words to be excluded') parser.add_argument('-c', '--count', type=int, dest='exclude_count', default=None, help="number of dictionary entries to remove (the default is all the entries in the word list)") parser.add_argument('-k', '--keep-words', action='store_true', default=False, help="leave at least one pronunciation for every word") parser.add_argument('--count-kept', action='store_true', default=False, help="when using --keep-words, count is the number of entries considered, even if they were not removed") args = parser.parse_args() words = args.wordlist.readlines() args.wordlist.close() words = [x.rstrip() for x in words] dictionary = PronunciationDictionary() dictionary.read(args.dictionary) args.dictionary.close() num_deleted = 0 for word in words: if (args.exclude_count is not None) and (num_deleted >= args.exclude_count): break # A trailing colon and number marks a pronunciation variant. colon_pos = word.rfind(':') if colon_pos > 0: pronunciation_id = int(word[colon_pos+1:]) word = word[:colon_pos] if args.keep_words: if dictionary[word].num_pronunciations() <= 1: if args.count_kept: num_deleted += 1 continue dictionary.delete_pronunciation(word, pronunciation_id) else: if args.keep_words: if args.count_kept: num_deleted += 1 continue dictionary.delete_word(word) num_deleted += 1 dictionary.write()
<commit_before><commit_msg>Remove listed files from a pronunciation dictionary.<commit_after>#!/usr/bin/env python3 # # Reads a dictionary and a file that contains a list of words that should be # removed. Then prints the dictionary with those words excluded. import argparse import sys import codecs from pronunciationdictionary import PronunciationDictionary from filetypes import TextFileType, BinaryFileType parser = argparse.ArgumentParser() parser.add_argument('dictionary', type=BinaryFileType('r'), help='the source dictionary') parser.add_argument('wordlist', type=TextFileType('r'), help='file containing a list of words to be excluded') parser.add_argument('-c', '--count', type=int, dest='exclude_count', default=None, help="number of dictionary entries to remove (the default is all the entries in the word list)") parser.add_argument('-k', '--keep-words', action='store_true', default=False, help="leave at least one pronunciation for every word") parser.add_argument('--count-kept', action='store_true', default=False, help="when using --keep-words, count is the number of entries considered, even if they were not removed") args = parser.parse_args() words = args.wordlist.readlines() args.wordlist.close() words = [x.rstrip() for x in words] dictionary = PronunciationDictionary() dictionary.read(args.dictionary) args.dictionary.close() num_deleted = 0 for word in words: if (args.exclude_count is not None) and (num_deleted >= args.exclude_count): break # A trailing colon and number marks a pronunciation variant. colon_pos = word.rfind(':') if colon_pos > 0: pronunciation_id = int(word[colon_pos+1:]) word = word[:colon_pos] if args.keep_words: if dictionary[word].num_pronunciations() <= 1: if args.count_kept: num_deleted += 1 continue dictionary.delete_pronunciation(word, pronunciation_id) else: if args.keep_words: if args.count_kept: num_deleted += 1 continue dictionary.delete_word(word) num_deleted += 1 dictionary.write()
197e6d0270803b8f32ae934def4cade29afab997
metpy/plots/tests/test_wx_symbols.py
metpy/plots/tests/test_wx_symbols.py
# Copyright (c) 2008-2015 MetPy Developers. # Distributed under the terms of the BSD 3-Clause License. # SPDX-License-Identifier: BSD-3-Clause from metpy.plots.wx_symbols import current_weather def test_mapper(): 'Test for symbol mapping functionality' assert current_weather(0) == '' assert current_weather(4) == u'\ue9a2' assert current_weather(7) == u'\ue9a5' assert current_weather(65) == u'\ue9e1' def test_alt_char(): 'Test alternate character functionality for mapper' assert current_weather.alt_char(7, 1) == u'\ue9a6' assert current_weather.alt_char(7, 2) == u'\ue9a7'
Add tests for weather symbol mapping.
Add tests for weather symbol mapping.
Python
bsd-3-clause
Unidata/MetPy,jrleeman/MetPy,ahill818/MetPy,ahaberlie/MetPy,jrleeman/MetPy,ahaberlie/MetPy,dopplershift/MetPy,Unidata/MetPy,ShawnMurd/MetPy,dopplershift/MetPy,deeplycloudy/MetPy
Add tests for weather symbol mapping.
# Copyright (c) 2008-2015 MetPy Developers. # Distributed under the terms of the BSD 3-Clause License. # SPDX-License-Identifier: BSD-3-Clause from metpy.plots.wx_symbols import current_weather def test_mapper(): 'Test for symbol mapping functionality' assert current_weather(0) == '' assert current_weather(4) == u'\ue9a2' assert current_weather(7) == u'\ue9a5' assert current_weather(65) == u'\ue9e1' def test_alt_char(): 'Test alternate character functionality for mapper' assert current_weather.alt_char(7, 1) == u'\ue9a6' assert current_weather.alt_char(7, 2) == u'\ue9a7'
<commit_before><commit_msg>Add tests for weather symbol mapping.<commit_after>
# Copyright (c) 2008-2015 MetPy Developers. # Distributed under the terms of the BSD 3-Clause License. # SPDX-License-Identifier: BSD-3-Clause from metpy.plots.wx_symbols import current_weather def test_mapper(): 'Test for symbol mapping functionality' assert current_weather(0) == '' assert current_weather(4) == u'\ue9a2' assert current_weather(7) == u'\ue9a5' assert current_weather(65) == u'\ue9e1' def test_alt_char(): 'Test alternate character functionality for mapper' assert current_weather.alt_char(7, 1) == u'\ue9a6' assert current_weather.alt_char(7, 2) == u'\ue9a7'
Add tests for weather symbol mapping.# Copyright (c) 2008-2015 MetPy Developers. # Distributed under the terms of the BSD 3-Clause License. # SPDX-License-Identifier: BSD-3-Clause from metpy.plots.wx_symbols import current_weather def test_mapper(): 'Test for symbol mapping functionality' assert current_weather(0) == '' assert current_weather(4) == u'\ue9a2' assert current_weather(7) == u'\ue9a5' assert current_weather(65) == u'\ue9e1' def test_alt_char(): 'Test alternate character functionality for mapper' assert current_weather.alt_char(7, 1) == u'\ue9a6' assert current_weather.alt_char(7, 2) == u'\ue9a7'
<commit_before><commit_msg>Add tests for weather symbol mapping.<commit_after># Copyright (c) 2008-2015 MetPy Developers. # Distributed under the terms of the BSD 3-Clause License. # SPDX-License-Identifier: BSD-3-Clause from metpy.plots.wx_symbols import current_weather def test_mapper(): 'Test for symbol mapping functionality' assert current_weather(0) == '' assert current_weather(4) == u'\ue9a2' assert current_weather(7) == u'\ue9a5' assert current_weather(65) == u'\ue9e1' def test_alt_char(): 'Test alternate character functionality for mapper' assert current_weather.alt_char(7, 1) == u'\ue9a6' assert current_weather.alt_char(7, 2) == u'\ue9a7'
0e8a1ce04d230d04cb36098fe53ccd5c239bbd54
mnist/tensorflow/fgsm_adversarial.py
mnist/tensorflow/fgsm_adversarial.py
from tensorflow.examples.tutorials.mnist import input_data import tensorflow as tf import argparse import matplotlib.pyplot as pyplot import random from feedforward_ann import neural_network, SAVE_PATH def fgsm(x, y_true, y_hat, epsilon=0.075): loss = tf.nn.softmax_cross_entropy_with_logits( labels=y_true, logits=y_hat) grad, = tf.gradients(loss, x) scaled_grad = epsilon * tf.sign(grad) return tf.stop_gradient(x + scaled_grad) def main(_): mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True) # Build model x = tf.placeholder(tf.float32, [None, 784]) y_true = tf.placeholder(tf.float32, [None, 10]) y_hat, keep_prob = neural_network(x) correct_prediction = tf.equal(tf.argmax(y_hat, 1), tf.argmax(y_true, 1)) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() saver.restore(sess, SAVE_PATH) img, label = mnist.test.images, mnist.test.labels # Create and then test our adversarial examples ae = fgsm(x, y_true, y_hat).eval(feed_dict={ x:img, y_true:label, keep_prob:1.0}) ae_logits = y_hat.eval(feed_dict={ x:ae, y_true:label, keep_prob:1.0}) # Calculate the fooling success rate fooled = tf.not_equal(tf.argmax(ae_logits, 1), tf.argmax(label, 1)) fooling_acc = tf.reduce_mean(tf.cast(fooled, tf.float32)) print('Fooling success rate: {:.2f}%'.format(fooling_acc.eval()*100)) # Display a random adversarial example (may not be successful) index = random.randint(0, len(img)) pic = img[index].reshape([28, 28]) pyplot.imshow(pic, cmap='gray') pyplot.title('Classified as {}'.format(tf.argmax(label, 1).eval()[index])) pyplot.show() pic = ae[index].reshape([28, 28]) pyplot.imshow(pic, cmap='gray') pyplot.title('Classified as {}'.format(tf.argmax(ae_logits, 1).eval()[index])) pyplot.show() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data_dir', type=str, default='/tmp/tensorflow/mnist/input_data', help='Directory for storing input data') FLAGS, unparsed = parser.parse_known_args() tf.app.run(main=main)
Add simple FGSM adversarial example crafting
Add simple FGSM adversarial example crafting
Python
apache-2.0
BrandonLMorris/ml-examples
Add simple FGSM adversarial example crafting
from tensorflow.examples.tutorials.mnist import input_data import tensorflow as tf import argparse import matplotlib.pyplot as pyplot import random from feedforward_ann import neural_network, SAVE_PATH def fgsm(x, y_true, y_hat, epsilon=0.075): loss = tf.nn.softmax_cross_entropy_with_logits( labels=y_true, logits=y_hat) grad, = tf.gradients(loss, x) scaled_grad = epsilon * tf.sign(grad) return tf.stop_gradient(x + scaled_grad) def main(_): mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True) # Build model x = tf.placeholder(tf.float32, [None, 784]) y_true = tf.placeholder(tf.float32, [None, 10]) y_hat, keep_prob = neural_network(x) correct_prediction = tf.equal(tf.argmax(y_hat, 1), tf.argmax(y_true, 1)) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() saver.restore(sess, SAVE_PATH) img, label = mnist.test.images, mnist.test.labels # Create and then test our adversarial examples ae = fgsm(x, y_true, y_hat).eval(feed_dict={ x:img, y_true:label, keep_prob:1.0}) ae_logits = y_hat.eval(feed_dict={ x:ae, y_true:label, keep_prob:1.0}) # Calculate the fooling success rate fooled = tf.not_equal(tf.argmax(ae_logits, 1), tf.argmax(label, 1)) fooling_acc = tf.reduce_mean(tf.cast(fooled, tf.float32)) print('Fooling success rate: {:.2f}%'.format(fooling_acc.eval()*100)) # Display a random adversarial example (may not be successful) index = random.randint(0, len(img)) pic = img[index].reshape([28, 28]) pyplot.imshow(pic, cmap='gray') pyplot.title('Classified as {}'.format(tf.argmax(label, 1).eval()[index])) pyplot.show() pic = ae[index].reshape([28, 28]) pyplot.imshow(pic, cmap='gray') pyplot.title('Classified as {}'.format(tf.argmax(ae_logits, 1).eval()[index])) pyplot.show() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data_dir', type=str, default='/tmp/tensorflow/mnist/input_data', help='Directory for storing input data') FLAGS, unparsed = parser.parse_known_args() tf.app.run(main=main)
<commit_before><commit_msg>Add simple FGSM adversarial example crafting<commit_after>
from tensorflow.examples.tutorials.mnist import input_data import tensorflow as tf import argparse import matplotlib.pyplot as pyplot import random from feedforward_ann import neural_network, SAVE_PATH def fgsm(x, y_true, y_hat, epsilon=0.075): loss = tf.nn.softmax_cross_entropy_with_logits( labels=y_true, logits=y_hat) grad, = tf.gradients(loss, x) scaled_grad = epsilon * tf.sign(grad) return tf.stop_gradient(x + scaled_grad) def main(_): mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True) # Build model x = tf.placeholder(tf.float32, [None, 784]) y_true = tf.placeholder(tf.float32, [None, 10]) y_hat, keep_prob = neural_network(x) correct_prediction = tf.equal(tf.argmax(y_hat, 1), tf.argmax(y_true, 1)) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() saver.restore(sess, SAVE_PATH) img, label = mnist.test.images, mnist.test.labels # Create and then test our adversarial examples ae = fgsm(x, y_true, y_hat).eval(feed_dict={ x:img, y_true:label, keep_prob:1.0}) ae_logits = y_hat.eval(feed_dict={ x:ae, y_true:label, keep_prob:1.0}) # Calculate the fooling success rate fooled = tf.not_equal(tf.argmax(ae_logits, 1), tf.argmax(label, 1)) fooling_acc = tf.reduce_mean(tf.cast(fooled, tf.float32)) print('Fooling success rate: {:.2f}%'.format(fooling_acc.eval()*100)) # Display a random adversarial example (may not be successful) index = random.randint(0, len(img)) pic = img[index].reshape([28, 28]) pyplot.imshow(pic, cmap='gray') pyplot.title('Classified as {}'.format(tf.argmax(label, 1).eval()[index])) pyplot.show() pic = ae[index].reshape([28, 28]) pyplot.imshow(pic, cmap='gray') pyplot.title('Classified as {}'.format(tf.argmax(ae_logits, 1).eval()[index])) pyplot.show() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data_dir', type=str, default='/tmp/tensorflow/mnist/input_data', help='Directory for storing input data') FLAGS, unparsed = parser.parse_known_args() tf.app.run(main=main)
Add simple FGSM adversarial example craftingfrom tensorflow.examples.tutorials.mnist import input_data import tensorflow as tf import argparse import matplotlib.pyplot as pyplot import random from feedforward_ann import neural_network, SAVE_PATH def fgsm(x, y_true, y_hat, epsilon=0.075): loss = tf.nn.softmax_cross_entropy_with_logits( labels=y_true, logits=y_hat) grad, = tf.gradients(loss, x) scaled_grad = epsilon * tf.sign(grad) return tf.stop_gradient(x + scaled_grad) def main(_): mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True) # Build model x = tf.placeholder(tf.float32, [None, 784]) y_true = tf.placeholder(tf.float32, [None, 10]) y_hat, keep_prob = neural_network(x) correct_prediction = tf.equal(tf.argmax(y_hat, 1), tf.argmax(y_true, 1)) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() saver.restore(sess, SAVE_PATH) img, label = mnist.test.images, mnist.test.labels # Create and then test our adversarial examples ae = fgsm(x, y_true, y_hat).eval(feed_dict={ x:img, y_true:label, keep_prob:1.0}) ae_logits = y_hat.eval(feed_dict={ x:ae, y_true:label, keep_prob:1.0}) # Calculate the fooling success rate fooled = tf.not_equal(tf.argmax(ae_logits, 1), tf.argmax(label, 1)) fooling_acc = tf.reduce_mean(tf.cast(fooled, tf.float32)) print('Fooling success rate: {:.2f}%'.format(fooling_acc.eval()*100)) # Display a random adversarial example (may not be successful) index = random.randint(0, len(img)) pic = img[index].reshape([28, 28]) pyplot.imshow(pic, cmap='gray') pyplot.title('Classified as {}'.format(tf.argmax(label, 1).eval()[index])) pyplot.show() pic = ae[index].reshape([28, 28]) pyplot.imshow(pic, cmap='gray') pyplot.title('Classified as {}'.format(tf.argmax(ae_logits, 1).eval()[index])) pyplot.show() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data_dir', type=str, default='/tmp/tensorflow/mnist/input_data', help='Directory for storing input data') FLAGS, unparsed = parser.parse_known_args() tf.app.run(main=main)
<commit_before><commit_msg>Add simple FGSM adversarial example crafting<commit_after>from tensorflow.examples.tutorials.mnist import input_data import tensorflow as tf import argparse import matplotlib.pyplot as pyplot import random from feedforward_ann import neural_network, SAVE_PATH def fgsm(x, y_true, y_hat, epsilon=0.075): loss = tf.nn.softmax_cross_entropy_with_logits( labels=y_true, logits=y_hat) grad, = tf.gradients(loss, x) scaled_grad = epsilon * tf.sign(grad) return tf.stop_gradient(x + scaled_grad) def main(_): mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True) # Build model x = tf.placeholder(tf.float32, [None, 784]) y_true = tf.placeholder(tf.float32, [None, 10]) y_hat, keep_prob = neural_network(x) correct_prediction = tf.equal(tf.argmax(y_hat, 1), tf.argmax(y_true, 1)) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) saver = tf.train.Saver() saver.restore(sess, SAVE_PATH) img, label = mnist.test.images, mnist.test.labels # Create and then test our adversarial examples ae = fgsm(x, y_true, y_hat).eval(feed_dict={ x:img, y_true:label, keep_prob:1.0}) ae_logits = y_hat.eval(feed_dict={ x:ae, y_true:label, keep_prob:1.0}) # Calculate the fooling success rate fooled = tf.not_equal(tf.argmax(ae_logits, 1), tf.argmax(label, 1)) fooling_acc = tf.reduce_mean(tf.cast(fooled, tf.float32)) print('Fooling success rate: {:.2f}%'.format(fooling_acc.eval()*100)) # Display a random adversarial example (may not be successful) index = random.randint(0, len(img)) pic = img[index].reshape([28, 28]) pyplot.imshow(pic, cmap='gray') pyplot.title('Classified as {}'.format(tf.argmax(label, 1).eval()[index])) pyplot.show() pic = ae[index].reshape([28, 28]) pyplot.imshow(pic, cmap='gray') pyplot.title('Classified as {}'.format(tf.argmax(ae_logits, 1).eval()[index])) pyplot.show() if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--data_dir', type=str, default='/tmp/tensorflow/mnist/input_data', help='Directory for storing input data') FLAGS, unparsed = parser.parse_known_args() tf.app.run(main=main)
d812af8535605ff963c3d3e3be1528dc615d5987
tests/test_async.py
tests/test_async.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from subliminal import Pool import os import unittest cache_dir = u'/tmp/sublicache' if not os.path.exists(cache_dir): os.mkdir(cache_dir) existing_video = u'/something/The.Big.Bang.Theory.S05E18.HDTV.x264-LOL.mp4' class AsyncTestCase(unittest.TestCase): def test_list_subtitles(self): with Pool(4) as p: print p.list_subtitles(existing_video, languages=['en', 'fr'], cache_dir=cache_dir, max_depth=3) if __name__ == '__main__': unittest.main()
Add a unittest for async
Add a unittest for async
Python
mit
t4lwh/subliminal,hpsbranco/subliminal,neo1691/subliminal,goll/subliminal,Diaoul/subliminal,juanmhidalgo/subliminal,SickRage/subliminal,Elettronik/subliminal,nvbn/subliminal,pums974/subliminal,oxan/subliminal,fernandog/subliminal,bogdal/subliminal,ofir123/subliminal,kbkailashbagaria/subliminal,ravselj/subliminal,ratoaq2/subliminal,h3llrais3r/subliminal,getzze/subliminal
Add a unittest for async
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from subliminal import Pool import os import unittest cache_dir = u'/tmp/sublicache' if not os.path.exists(cache_dir): os.mkdir(cache_dir) existing_video = u'/something/The.Big.Bang.Theory.S05E18.HDTV.x264-LOL.mp4' class AsyncTestCase(unittest.TestCase): def test_list_subtitles(self): with Pool(4) as p: print p.list_subtitles(existing_video, languages=['en', 'fr'], cache_dir=cache_dir, max_depth=3) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Add a unittest for async<commit_after>
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from subliminal import Pool import os import unittest cache_dir = u'/tmp/sublicache' if not os.path.exists(cache_dir): os.mkdir(cache_dir) existing_video = u'/something/The.Big.Bang.Theory.S05E18.HDTV.x264-LOL.mp4' class AsyncTestCase(unittest.TestCase): def test_list_subtitles(self): with Pool(4) as p: print p.list_subtitles(existing_video, languages=['en', 'fr'], cache_dir=cache_dir, max_depth=3) if __name__ == '__main__': unittest.main()
Add a unittest for async#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from subliminal import Pool import os import unittest cache_dir = u'/tmp/sublicache' if not os.path.exists(cache_dir): os.mkdir(cache_dir) existing_video = u'/something/The.Big.Bang.Theory.S05E18.HDTV.x264-LOL.mp4' class AsyncTestCase(unittest.TestCase): def test_list_subtitles(self): with Pool(4) as p: print p.list_subtitles(existing_video, languages=['en', 'fr'], cache_dir=cache_dir, max_depth=3) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Add a unittest for async<commit_after>#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright 2011-2012 Antoine Bertin <diaoulael@gmail.com> # # This file is part of subliminal. # # subliminal is free software; you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # subliminal is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with subliminal. If not, see <http://www.gnu.org/licenses/>. from subliminal import Pool import os import unittest cache_dir = u'/tmp/sublicache' if not os.path.exists(cache_dir): os.mkdir(cache_dir) existing_video = u'/something/The.Big.Bang.Theory.S05E18.HDTV.x264-LOL.mp4' class AsyncTestCase(unittest.TestCase): def test_list_subtitles(self): with Pool(4) as p: print p.list_subtitles(existing_video, languages=['en', 'fr'], cache_dir=cache_dir, max_depth=3) if __name__ == '__main__': unittest.main()
03188fe77f895b918dbbf3903a5eea8eb5256cd0
evaluation/packages/primitive.py
evaluation/packages/primitive.py
"""@package Primitive This module is the Python counterpart of the C++ LinePrimitive and and PlanePrimitive classes See C++ GlobOpt project for more details on Primitives """ import numpy as np class Primitive(object): def __init__(self, uid, did, pos=np.zeros(3), normal=np.array([ 1., 0., 0.]).T ): self.uid = uid self.did = did self.pos = pos self.normal = normal def distanceTo(self, pos) : return np.dot(pos-self.pos, self.normal) def readPrimitivesFromFile(path): f = open(path, 'r') primitives = [] for line in f: if line[0] != '#': seqline = line.split(',') pos = np.array([float(seqline[0]), float(seqline[1]), float(seqline[2])]).T normal = np.array([float(seqline[3]), float(seqline[4]), float(seqline[5])]).T uid = seqline[6] did = seqline[7] primitives.append(Primitive( uid, did, pos, normal )) return primitives
Add new Primitive class + loading function to load Primitive arrays from file
Add new Primitive class + loading function to load Primitive arrays from file
Python
apache-2.0
amonszpart/globOpt,amonszpart/globOpt,amonszpart/globOpt,NUAAXXY/globOpt,amonszpart/globOpt,NUAAXXY/globOpt,amonszpart/globOpt,amonszpart/globOpt,NUAAXXY/globOpt,NUAAXXY/globOpt,NUAAXXY/globOpt,NUAAXXY/globOpt
Add new Primitive class + loading function to load Primitive arrays from file
"""@package Primitive This module is the Python counterpart of the C++ LinePrimitive and and PlanePrimitive classes See C++ GlobOpt project for more details on Primitives """ import numpy as np class Primitive(object): def __init__(self, uid, did, pos=np.zeros(3), normal=np.array([ 1., 0., 0.]).T ): self.uid = uid self.did = did self.pos = pos self.normal = normal def distanceTo(self, pos) : return np.dot(pos-self.pos, self.normal) def readPrimitivesFromFile(path): f = open(path, 'r') primitives = [] for line in f: if line[0] != '#': seqline = line.split(',') pos = np.array([float(seqline[0]), float(seqline[1]), float(seqline[2])]).T normal = np.array([float(seqline[3]), float(seqline[4]), float(seqline[5])]).T uid = seqline[6] did = seqline[7] primitives.append(Primitive( uid, did, pos, normal )) return primitives
<commit_before><commit_msg>Add new Primitive class + loading function to load Primitive arrays from file<commit_after>
"""@package Primitive This module is the Python counterpart of the C++ LinePrimitive and and PlanePrimitive classes See C++ GlobOpt project for more details on Primitives """ import numpy as np class Primitive(object): def __init__(self, uid, did, pos=np.zeros(3), normal=np.array([ 1., 0., 0.]).T ): self.uid = uid self.did = did self.pos = pos self.normal = normal def distanceTo(self, pos) : return np.dot(pos-self.pos, self.normal) def readPrimitivesFromFile(path): f = open(path, 'r') primitives = [] for line in f: if line[0] != '#': seqline = line.split(',') pos = np.array([float(seqline[0]), float(seqline[1]), float(seqline[2])]).T normal = np.array([float(seqline[3]), float(seqline[4]), float(seqline[5])]).T uid = seqline[6] did = seqline[7] primitives.append(Primitive( uid, did, pos, normal )) return primitives
Add new Primitive class + loading function to load Primitive arrays from file"""@package Primitive This module is the Python counterpart of the C++ LinePrimitive and and PlanePrimitive classes See C++ GlobOpt project for more details on Primitives """ import numpy as np class Primitive(object): def __init__(self, uid, did, pos=np.zeros(3), normal=np.array([ 1., 0., 0.]).T ): self.uid = uid self.did = did self.pos = pos self.normal = normal def distanceTo(self, pos) : return np.dot(pos-self.pos, self.normal) def readPrimitivesFromFile(path): f = open(path, 'r') primitives = [] for line in f: if line[0] != '#': seqline = line.split(',') pos = np.array([float(seqline[0]), float(seqline[1]), float(seqline[2])]).T normal = np.array([float(seqline[3]), float(seqline[4]), float(seqline[5])]).T uid = seqline[6] did = seqline[7] primitives.append(Primitive( uid, did, pos, normal )) return primitives
<commit_before><commit_msg>Add new Primitive class + loading function to load Primitive arrays from file<commit_after>"""@package Primitive This module is the Python counterpart of the C++ LinePrimitive and and PlanePrimitive classes See C++ GlobOpt project for more details on Primitives """ import numpy as np class Primitive(object): def __init__(self, uid, did, pos=np.zeros(3), normal=np.array([ 1., 0., 0.]).T ): self.uid = uid self.did = did self.pos = pos self.normal = normal def distanceTo(self, pos) : return np.dot(pos-self.pos, self.normal) def readPrimitivesFromFile(path): f = open(path, 'r') primitives = [] for line in f: if line[0] != '#': seqline = line.split(',') pos = np.array([float(seqline[0]), float(seqline[1]), float(seqline[2])]).T normal = np.array([float(seqline[3]), float(seqline[4]), float(seqline[5])]).T uid = seqline[6] did = seqline[7] primitives.append(Primitive( uid, did, pos, normal )) return primitives
139db56d67ff7f0e1761c303e0074d713f25d646
gpi/Poisson_GPI.py
gpi/Poisson_GPI.py
# Author: Ashley Anderson III <aganders3@gmail.com> # Date: 2015-10-10 19:37 # Copyright (C) 2015 Dignity Health from __future__ import absolute_import, division, print_function, unicode_literals import os # gpi, future import gpi from bart.gpi.borg import IFilePath, OFilePath, Command # bart import bart base_path = bart.__path__[0] # library base for executables import bart.python.cfl as cfl class ExternalNode(gpi.NodeAPI): '''Usage: poisson [-Y/Z dim] [-y/z acc] [-v] [-e] [-C center] <outfile> Computes Poisson-disc sampling pattern. -Y size dimension 1 (phase 1) -Z size dimension 2 (phase 2) -y acceleration (dim 1) -z acceleration (dim 2) -C size of calibration region -v variable density -e elliptical scanning ''' def initUI(self): # Widgets self.addWidget('SpinBox', 'y size', min=3) self.addWidget('SpinBox', 'z size', min=3) self.addWidget('SpinBox', 'y accel', min=1, val=2) self.addWidget('SpinBox', 'z accel', min=1, val=2) self.addWidget('SpinBox', 'cal size', min=0, val=0) self.addWidget('PushButton', 'variable density', toggle=True) self.addWidget('PushButton', 'elliptical', toggle=True) # IO Ports self.addOutPort('out', 'NPYarray') return 0 def validate(self): if (self.getVal('cal size') > self.getVal('y size') or self.getVal('cal size') > self.getVal('y size')): return -1 return 0 def compute(self): # load up arguments list args = [base_path+'/poisson'] args += ['-Y {}'.format(self.getVal('y size'))] args += ['-Z {}'.format(self.getVal('z size'))] args += ['-y {}'.format(self.getVal('y accel'))] args += ['-z {}'.format(self.getVal('z accel'))] args += ['-C {}'.format(self.getVal('cal size'))] if self.getVal('variable density'): args += ['-v'] if self.getVal('elliptical'): args += ['-e'] # setup file for getting data from external command out = OFilePath(cfl.readcfl, asuffix=['.cfl','.hdr']) args += [out] # run commandline print(Command(*args)) self.setData('out', out.data()) out.close() return 0
Add Poission node - generates Poisson-disc sampling pattern
Add Poission node - generates Poisson-disc sampling pattern
Python
bsd-3-clause
nckz/bart,nckz/bart,nckz/bart,nckz/bart,nckz/bart
Add Poission node - generates Poisson-disc sampling pattern
# Author: Ashley Anderson III <aganders3@gmail.com> # Date: 2015-10-10 19:37 # Copyright (C) 2015 Dignity Health from __future__ import absolute_import, division, print_function, unicode_literals import os # gpi, future import gpi from bart.gpi.borg import IFilePath, OFilePath, Command # bart import bart base_path = bart.__path__[0] # library base for executables import bart.python.cfl as cfl class ExternalNode(gpi.NodeAPI): '''Usage: poisson [-Y/Z dim] [-y/z acc] [-v] [-e] [-C center] <outfile> Computes Poisson-disc sampling pattern. -Y size dimension 1 (phase 1) -Z size dimension 2 (phase 2) -y acceleration (dim 1) -z acceleration (dim 2) -C size of calibration region -v variable density -e elliptical scanning ''' def initUI(self): # Widgets self.addWidget('SpinBox', 'y size', min=3) self.addWidget('SpinBox', 'z size', min=3) self.addWidget('SpinBox', 'y accel', min=1, val=2) self.addWidget('SpinBox', 'z accel', min=1, val=2) self.addWidget('SpinBox', 'cal size', min=0, val=0) self.addWidget('PushButton', 'variable density', toggle=True) self.addWidget('PushButton', 'elliptical', toggle=True) # IO Ports self.addOutPort('out', 'NPYarray') return 0 def validate(self): if (self.getVal('cal size') > self.getVal('y size') or self.getVal('cal size') > self.getVal('y size')): return -1 return 0 def compute(self): # load up arguments list args = [base_path+'/poisson'] args += ['-Y {}'.format(self.getVal('y size'))] args += ['-Z {}'.format(self.getVal('z size'))] args += ['-y {}'.format(self.getVal('y accel'))] args += ['-z {}'.format(self.getVal('z accel'))] args += ['-C {}'.format(self.getVal('cal size'))] if self.getVal('variable density'): args += ['-v'] if self.getVal('elliptical'): args += ['-e'] # setup file for getting data from external command out = OFilePath(cfl.readcfl, asuffix=['.cfl','.hdr']) args += [out] # run commandline print(Command(*args)) self.setData('out', out.data()) out.close() return 0
<commit_before><commit_msg>Add Poission node - generates Poisson-disc sampling pattern<commit_after>
# Author: Ashley Anderson III <aganders3@gmail.com> # Date: 2015-10-10 19:37 # Copyright (C) 2015 Dignity Health from __future__ import absolute_import, division, print_function, unicode_literals import os # gpi, future import gpi from bart.gpi.borg import IFilePath, OFilePath, Command # bart import bart base_path = bart.__path__[0] # library base for executables import bart.python.cfl as cfl class ExternalNode(gpi.NodeAPI): '''Usage: poisson [-Y/Z dim] [-y/z acc] [-v] [-e] [-C center] <outfile> Computes Poisson-disc sampling pattern. -Y size dimension 1 (phase 1) -Z size dimension 2 (phase 2) -y acceleration (dim 1) -z acceleration (dim 2) -C size of calibration region -v variable density -e elliptical scanning ''' def initUI(self): # Widgets self.addWidget('SpinBox', 'y size', min=3) self.addWidget('SpinBox', 'z size', min=3) self.addWidget('SpinBox', 'y accel', min=1, val=2) self.addWidget('SpinBox', 'z accel', min=1, val=2) self.addWidget('SpinBox', 'cal size', min=0, val=0) self.addWidget('PushButton', 'variable density', toggle=True) self.addWidget('PushButton', 'elliptical', toggle=True) # IO Ports self.addOutPort('out', 'NPYarray') return 0 def validate(self): if (self.getVal('cal size') > self.getVal('y size') or self.getVal('cal size') > self.getVal('y size')): return -1 return 0 def compute(self): # load up arguments list args = [base_path+'/poisson'] args += ['-Y {}'.format(self.getVal('y size'))] args += ['-Z {}'.format(self.getVal('z size'))] args += ['-y {}'.format(self.getVal('y accel'))] args += ['-z {}'.format(self.getVal('z accel'))] args += ['-C {}'.format(self.getVal('cal size'))] if self.getVal('variable density'): args += ['-v'] if self.getVal('elliptical'): args += ['-e'] # setup file for getting data from external command out = OFilePath(cfl.readcfl, asuffix=['.cfl','.hdr']) args += [out] # run commandline print(Command(*args)) self.setData('out', out.data()) out.close() return 0
Add Poission node - generates Poisson-disc sampling pattern# Author: Ashley Anderson III <aganders3@gmail.com> # Date: 2015-10-10 19:37 # Copyright (C) 2015 Dignity Health from __future__ import absolute_import, division, print_function, unicode_literals import os # gpi, future import gpi from bart.gpi.borg import IFilePath, OFilePath, Command # bart import bart base_path = bart.__path__[0] # library base for executables import bart.python.cfl as cfl class ExternalNode(gpi.NodeAPI): '''Usage: poisson [-Y/Z dim] [-y/z acc] [-v] [-e] [-C center] <outfile> Computes Poisson-disc sampling pattern. -Y size dimension 1 (phase 1) -Z size dimension 2 (phase 2) -y acceleration (dim 1) -z acceleration (dim 2) -C size of calibration region -v variable density -e elliptical scanning ''' def initUI(self): # Widgets self.addWidget('SpinBox', 'y size', min=3) self.addWidget('SpinBox', 'z size', min=3) self.addWidget('SpinBox', 'y accel', min=1, val=2) self.addWidget('SpinBox', 'z accel', min=1, val=2) self.addWidget('SpinBox', 'cal size', min=0, val=0) self.addWidget('PushButton', 'variable density', toggle=True) self.addWidget('PushButton', 'elliptical', toggle=True) # IO Ports self.addOutPort('out', 'NPYarray') return 0 def validate(self): if (self.getVal('cal size') > self.getVal('y size') or self.getVal('cal size') > self.getVal('y size')): return -1 return 0 def compute(self): # load up arguments list args = [base_path+'/poisson'] args += ['-Y {}'.format(self.getVal('y size'))] args += ['-Z {}'.format(self.getVal('z size'))] args += ['-y {}'.format(self.getVal('y accel'))] args += ['-z {}'.format(self.getVal('z accel'))] args += ['-C {}'.format(self.getVal('cal size'))] if self.getVal('variable density'): args += ['-v'] if self.getVal('elliptical'): args += ['-e'] # setup file for getting data from external command out = OFilePath(cfl.readcfl, asuffix=['.cfl','.hdr']) args += [out] # run commandline print(Command(*args)) self.setData('out', out.data()) out.close() return 0
<commit_before><commit_msg>Add Poission node - generates Poisson-disc sampling pattern<commit_after># Author: Ashley Anderson III <aganders3@gmail.com> # Date: 2015-10-10 19:37 # Copyright (C) 2015 Dignity Health from __future__ import absolute_import, division, print_function, unicode_literals import os # gpi, future import gpi from bart.gpi.borg import IFilePath, OFilePath, Command # bart import bart base_path = bart.__path__[0] # library base for executables import bart.python.cfl as cfl class ExternalNode(gpi.NodeAPI): '''Usage: poisson [-Y/Z dim] [-y/z acc] [-v] [-e] [-C center] <outfile> Computes Poisson-disc sampling pattern. -Y size dimension 1 (phase 1) -Z size dimension 2 (phase 2) -y acceleration (dim 1) -z acceleration (dim 2) -C size of calibration region -v variable density -e elliptical scanning ''' def initUI(self): # Widgets self.addWidget('SpinBox', 'y size', min=3) self.addWidget('SpinBox', 'z size', min=3) self.addWidget('SpinBox', 'y accel', min=1, val=2) self.addWidget('SpinBox', 'z accel', min=1, val=2) self.addWidget('SpinBox', 'cal size', min=0, val=0) self.addWidget('PushButton', 'variable density', toggle=True) self.addWidget('PushButton', 'elliptical', toggle=True) # IO Ports self.addOutPort('out', 'NPYarray') return 0 def validate(self): if (self.getVal('cal size') > self.getVal('y size') or self.getVal('cal size') > self.getVal('y size')): return -1 return 0 def compute(self): # load up arguments list args = [base_path+'/poisson'] args += ['-Y {}'.format(self.getVal('y size'))] args += ['-Z {}'.format(self.getVal('z size'))] args += ['-y {}'.format(self.getVal('y accel'))] args += ['-z {}'.format(self.getVal('z accel'))] args += ['-C {}'.format(self.getVal('cal size'))] if self.getVal('variable density'): args += ['-v'] if self.getVal('elliptical'): args += ['-e'] # setup file for getting data from external command out = OFilePath(cfl.readcfl, asuffix=['.cfl','.hdr']) args += [out] # run commandline print(Command(*args)) self.setData('out', out.data()) out.close() return 0
393309ed43fbf852a698fe7dcd763d282a761590
src/pluggy/_result.py
src/pluggy/_result.py
""" Hook wrapper "result" utilities. """ import sys import warnings _py3 = sys.version_info > (3, 0) if not _py3: exec( """ def _reraise(cls, val, tb): raise cls, val, tb """ ) def _raise_wrapfail(wrap_controller, msg): co = wrap_controller.gi_code raise RuntimeError( "wrap_controller at %r %s:%d %s" % (co.co_name, co.co_filename, co.co_firstlineno, msg) ) class HookCallError(Exception): """ Hook was called wrongly. """ class _Result(object): def __init__(self, result, excinfo): self._result = result self._excinfo = excinfo @property def excinfo(self): return self._excinfo @property def result(self): """Get the result(s) for this hook call (DEPRECATED in favor of ``get_result()``).""" msg = "Use get_result() which forces correct exception handling" warnings.warn(DeprecationWarning(msg), stacklevel=2) return self._result @classmethod def from_call(cls, func): __tracebackhide__ = True result = excinfo = None try: result = func() except BaseException: excinfo = sys.exc_info() return cls(result, excinfo) def force_result(self, result): """Force the result(s) to ``result``. If the hook was marked as a ``firstresult`` a single value should be set otherwise set a (modified) list of results. Any exceptions found during invocation will be deleted. """ self._result = result self._excinfo = None def get_result(self): """Get the result(s) for this hook call. If the hook was marked as a ``firstresult`` only a single value will be returned otherwise a list of results. """ __tracebackhide__ = True if self._excinfo is None: return self._result else: ex = self._excinfo if _py3: raise ex[1].with_traceback(ex[2]) _reraise(*ex) # noqa
Move wrapper "result" types into new module
Move wrapper "result" types into new module
Python
mit
RonnyPfannschmidt/pluggy,RonnyPfannschmidt/pluggy,hpk42/pluggy,pytest-dev/pluggy,pytest-dev/pluggy
Move wrapper "result" types into new module
""" Hook wrapper "result" utilities. """ import sys import warnings _py3 = sys.version_info > (3, 0) if not _py3: exec( """ def _reraise(cls, val, tb): raise cls, val, tb """ ) def _raise_wrapfail(wrap_controller, msg): co = wrap_controller.gi_code raise RuntimeError( "wrap_controller at %r %s:%d %s" % (co.co_name, co.co_filename, co.co_firstlineno, msg) ) class HookCallError(Exception): """ Hook was called wrongly. """ class _Result(object): def __init__(self, result, excinfo): self._result = result self._excinfo = excinfo @property def excinfo(self): return self._excinfo @property def result(self): """Get the result(s) for this hook call (DEPRECATED in favor of ``get_result()``).""" msg = "Use get_result() which forces correct exception handling" warnings.warn(DeprecationWarning(msg), stacklevel=2) return self._result @classmethod def from_call(cls, func): __tracebackhide__ = True result = excinfo = None try: result = func() except BaseException: excinfo = sys.exc_info() return cls(result, excinfo) def force_result(self, result): """Force the result(s) to ``result``. If the hook was marked as a ``firstresult`` a single value should be set otherwise set a (modified) list of results. Any exceptions found during invocation will be deleted. """ self._result = result self._excinfo = None def get_result(self): """Get the result(s) for this hook call. If the hook was marked as a ``firstresult`` only a single value will be returned otherwise a list of results. """ __tracebackhide__ = True if self._excinfo is None: return self._result else: ex = self._excinfo if _py3: raise ex[1].with_traceback(ex[2]) _reraise(*ex) # noqa
<commit_before><commit_msg>Move wrapper "result" types into new module<commit_after>
""" Hook wrapper "result" utilities. """ import sys import warnings _py3 = sys.version_info > (3, 0) if not _py3: exec( """ def _reraise(cls, val, tb): raise cls, val, tb """ ) def _raise_wrapfail(wrap_controller, msg): co = wrap_controller.gi_code raise RuntimeError( "wrap_controller at %r %s:%d %s" % (co.co_name, co.co_filename, co.co_firstlineno, msg) ) class HookCallError(Exception): """ Hook was called wrongly. """ class _Result(object): def __init__(self, result, excinfo): self._result = result self._excinfo = excinfo @property def excinfo(self): return self._excinfo @property def result(self): """Get the result(s) for this hook call (DEPRECATED in favor of ``get_result()``).""" msg = "Use get_result() which forces correct exception handling" warnings.warn(DeprecationWarning(msg), stacklevel=2) return self._result @classmethod def from_call(cls, func): __tracebackhide__ = True result = excinfo = None try: result = func() except BaseException: excinfo = sys.exc_info() return cls(result, excinfo) def force_result(self, result): """Force the result(s) to ``result``. If the hook was marked as a ``firstresult`` a single value should be set otherwise set a (modified) list of results. Any exceptions found during invocation will be deleted. """ self._result = result self._excinfo = None def get_result(self): """Get the result(s) for this hook call. If the hook was marked as a ``firstresult`` only a single value will be returned otherwise a list of results. """ __tracebackhide__ = True if self._excinfo is None: return self._result else: ex = self._excinfo if _py3: raise ex[1].with_traceback(ex[2]) _reraise(*ex) # noqa
Move wrapper "result" types into new module""" Hook wrapper "result" utilities. """ import sys import warnings _py3 = sys.version_info > (3, 0) if not _py3: exec( """ def _reraise(cls, val, tb): raise cls, val, tb """ ) def _raise_wrapfail(wrap_controller, msg): co = wrap_controller.gi_code raise RuntimeError( "wrap_controller at %r %s:%d %s" % (co.co_name, co.co_filename, co.co_firstlineno, msg) ) class HookCallError(Exception): """ Hook was called wrongly. """ class _Result(object): def __init__(self, result, excinfo): self._result = result self._excinfo = excinfo @property def excinfo(self): return self._excinfo @property def result(self): """Get the result(s) for this hook call (DEPRECATED in favor of ``get_result()``).""" msg = "Use get_result() which forces correct exception handling" warnings.warn(DeprecationWarning(msg), stacklevel=2) return self._result @classmethod def from_call(cls, func): __tracebackhide__ = True result = excinfo = None try: result = func() except BaseException: excinfo = sys.exc_info() return cls(result, excinfo) def force_result(self, result): """Force the result(s) to ``result``. If the hook was marked as a ``firstresult`` a single value should be set otherwise set a (modified) list of results. Any exceptions found during invocation will be deleted. """ self._result = result self._excinfo = None def get_result(self): """Get the result(s) for this hook call. If the hook was marked as a ``firstresult`` only a single value will be returned otherwise a list of results. """ __tracebackhide__ = True if self._excinfo is None: return self._result else: ex = self._excinfo if _py3: raise ex[1].with_traceback(ex[2]) _reraise(*ex) # noqa
<commit_before><commit_msg>Move wrapper "result" types into new module<commit_after>""" Hook wrapper "result" utilities. """ import sys import warnings _py3 = sys.version_info > (3, 0) if not _py3: exec( """ def _reraise(cls, val, tb): raise cls, val, tb """ ) def _raise_wrapfail(wrap_controller, msg): co = wrap_controller.gi_code raise RuntimeError( "wrap_controller at %r %s:%d %s" % (co.co_name, co.co_filename, co.co_firstlineno, msg) ) class HookCallError(Exception): """ Hook was called wrongly. """ class _Result(object): def __init__(self, result, excinfo): self._result = result self._excinfo = excinfo @property def excinfo(self): return self._excinfo @property def result(self): """Get the result(s) for this hook call (DEPRECATED in favor of ``get_result()``).""" msg = "Use get_result() which forces correct exception handling" warnings.warn(DeprecationWarning(msg), stacklevel=2) return self._result @classmethod def from_call(cls, func): __tracebackhide__ = True result = excinfo = None try: result = func() except BaseException: excinfo = sys.exc_info() return cls(result, excinfo) def force_result(self, result): """Force the result(s) to ``result``. If the hook was marked as a ``firstresult`` a single value should be set otherwise set a (modified) list of results. Any exceptions found during invocation will be deleted. """ self._result = result self._excinfo = None def get_result(self): """Get the result(s) for this hook call. If the hook was marked as a ``firstresult`` only a single value will be returned otherwise a list of results. """ __tracebackhide__ = True if self._excinfo is None: return self._result else: ex = self._excinfo if _py3: raise ex[1].with_traceback(ex[2]) _reraise(*ex) # noqa
50e588ff368ce3ccd269f432ee543bd022a4ab22
polyaxon/auditor/events/superuser.py
polyaxon/auditor/events/superuser.py
import auditor from libs.event_manager import event_types from libs.event_manager.event import Event class SuperUserRoleGrantedEvent(Event): type = event_types.SUPERUSER_ROLE_GRANTED class SuperUserRoleRevokedEvent(Event): type = event_types.SUPERUSER_ROLE_REVOKED auditor.register(SuperUserRoleGrantedEvent) auditor.register(SuperUserRoleRevokedEvent)
Add auditor super user's events
Add auditor super user's events
Python
apache-2.0
polyaxon/polyaxon,polyaxon/polyaxon,polyaxon/polyaxon
Add auditor super user's events
import auditor from libs.event_manager import event_types from libs.event_manager.event import Event class SuperUserRoleGrantedEvent(Event): type = event_types.SUPERUSER_ROLE_GRANTED class SuperUserRoleRevokedEvent(Event): type = event_types.SUPERUSER_ROLE_REVOKED auditor.register(SuperUserRoleGrantedEvent) auditor.register(SuperUserRoleRevokedEvent)
<commit_before><commit_msg>Add auditor super user's events<commit_after>
import auditor from libs.event_manager import event_types from libs.event_manager.event import Event class SuperUserRoleGrantedEvent(Event): type = event_types.SUPERUSER_ROLE_GRANTED class SuperUserRoleRevokedEvent(Event): type = event_types.SUPERUSER_ROLE_REVOKED auditor.register(SuperUserRoleGrantedEvent) auditor.register(SuperUserRoleRevokedEvent)
Add auditor super user's eventsimport auditor from libs.event_manager import event_types from libs.event_manager.event import Event class SuperUserRoleGrantedEvent(Event): type = event_types.SUPERUSER_ROLE_GRANTED class SuperUserRoleRevokedEvent(Event): type = event_types.SUPERUSER_ROLE_REVOKED auditor.register(SuperUserRoleGrantedEvent) auditor.register(SuperUserRoleRevokedEvent)
<commit_before><commit_msg>Add auditor super user's events<commit_after>import auditor from libs.event_manager import event_types from libs.event_manager.event import Event class SuperUserRoleGrantedEvent(Event): type = event_types.SUPERUSER_ROLE_GRANTED class SuperUserRoleRevokedEvent(Event): type = event_types.SUPERUSER_ROLE_REVOKED auditor.register(SuperUserRoleGrantedEvent) auditor.register(SuperUserRoleRevokedEvent)
1d7660282be42d96d912855eeb3bd4dfe830f224
bitvault/test/scripts/client_usage.py
bitvault/test/scripts/client_usage.py
import time import bitvault current_milli_time = lambda: int(round(time.time())) email = '{0}@bitvault.io'.format(current_milli_time()) password = u'incredibly_secure' ## API discovery # # The BitVault server provides a JSON description of its API that allows # the client to generate all necessary resource classes at runtime. # FIXME: This is from high_level.py; why aren't we just calling authed_client? client = bitvault.client(u'http://localhost:8998') ## User management # # The create action returns a User Resource which has: # # * action methods (get, update, reset) # * attributes (email, first_name, etc.) # * associated resources (applications) client.users.create(email=email, password=password) # Get an authenticated client representing the new user client = bitvault.authed_client(email=email, password=password) user = client.user ## Application management ## Fetch applications # # If the applications collection is not populated, it will be fetched from the # server. The cached version will be used if it is already loaded. A refresh can # be triggered by passing it as an option to the action. user.applications user.applications.refresh() ## Create an application. # # The optional callback_url attribute specifies a URL where BitVault # can POST event information such as confirmed transactions. app = user.applications.create( name=u'bitcoin_app', callback_url=u'https://someapp.com/callback') ## Wallets # # Wallets belong to applications, not directly to users. They require # a passphrase to be provided on creation. wallet = app.wallets.create(passphrase=u'very insecure', name=u'my funds') # An application's wallet collection is enumerable for wallet in app.wallets.values(): print(wallet) # And acts as a hash with names as keys wallet = app.wallets[u'my funds'] # The passphrase is required to unlock the wallet before you can # perform any transactions with it. wallet.unlock(u'very insecure') ## Accounts # # Wallets can have multiple accounts, each represented by a path in the # MultiWallet's deterministic trees. account = wallet.accounts.create(name=u'office supplies') ## Payments # # Sending payments # Creating addresses for receiving payments # This is a BIP 16 "Pay to Script Hash" address, where the script in question # is a BIP 11 "multisig". payment_address = account.addresses.create # TODO: Additional method "prepare" to obtain unsigned transaction for inspection payment = account.pay(payees=({u'address': payment_address, u'amount': 20000},)) ## Transfers account_1 = wallet.accounts[u'rubber bands'] account_2 = wallet.accounts.create(name=u'travel expenses') wallet.transfer(amount=10000, source=account_1, destination=account_2)
Add high-level client usage script
Add high-level client usage script
Python
mit
GemHQ/round-py
Add high-level client usage script
import time import bitvault current_milli_time = lambda: int(round(time.time())) email = '{0}@bitvault.io'.format(current_milli_time()) password = u'incredibly_secure' ## API discovery # # The BitVault server provides a JSON description of its API that allows # the client to generate all necessary resource classes at runtime. # FIXME: This is from high_level.py; why aren't we just calling authed_client? client = bitvault.client(u'http://localhost:8998') ## User management # # The create action returns a User Resource which has: # # * action methods (get, update, reset) # * attributes (email, first_name, etc.) # * associated resources (applications) client.users.create(email=email, password=password) # Get an authenticated client representing the new user client = bitvault.authed_client(email=email, password=password) user = client.user ## Application management ## Fetch applications # # If the applications collection is not populated, it will be fetched from the # server. The cached version will be used if it is already loaded. A refresh can # be triggered by passing it as an option to the action. user.applications user.applications.refresh() ## Create an application. # # The optional callback_url attribute specifies a URL where BitVault # can POST event information such as confirmed transactions. app = user.applications.create( name=u'bitcoin_app', callback_url=u'https://someapp.com/callback') ## Wallets # # Wallets belong to applications, not directly to users. They require # a passphrase to be provided on creation. wallet = app.wallets.create(passphrase=u'very insecure', name=u'my funds') # An application's wallet collection is enumerable for wallet in app.wallets.values(): print(wallet) # And acts as a hash with names as keys wallet = app.wallets[u'my funds'] # The passphrase is required to unlock the wallet before you can # perform any transactions with it. wallet.unlock(u'very insecure') ## Accounts # # Wallets can have multiple accounts, each represented by a path in the # MultiWallet's deterministic trees. account = wallet.accounts.create(name=u'office supplies') ## Payments # # Sending payments # Creating addresses for receiving payments # This is a BIP 16 "Pay to Script Hash" address, where the script in question # is a BIP 11 "multisig". payment_address = account.addresses.create # TODO: Additional method "prepare" to obtain unsigned transaction for inspection payment = account.pay(payees=({u'address': payment_address, u'amount': 20000},)) ## Transfers account_1 = wallet.accounts[u'rubber bands'] account_2 = wallet.accounts.create(name=u'travel expenses') wallet.transfer(amount=10000, source=account_1, destination=account_2)
<commit_before><commit_msg>Add high-level client usage script<commit_after>
import time import bitvault current_milli_time = lambda: int(round(time.time())) email = '{0}@bitvault.io'.format(current_milli_time()) password = u'incredibly_secure' ## API discovery # # The BitVault server provides a JSON description of its API that allows # the client to generate all necessary resource classes at runtime. # FIXME: This is from high_level.py; why aren't we just calling authed_client? client = bitvault.client(u'http://localhost:8998') ## User management # # The create action returns a User Resource which has: # # * action methods (get, update, reset) # * attributes (email, first_name, etc.) # * associated resources (applications) client.users.create(email=email, password=password) # Get an authenticated client representing the new user client = bitvault.authed_client(email=email, password=password) user = client.user ## Application management ## Fetch applications # # If the applications collection is not populated, it will be fetched from the # server. The cached version will be used if it is already loaded. A refresh can # be triggered by passing it as an option to the action. user.applications user.applications.refresh() ## Create an application. # # The optional callback_url attribute specifies a URL where BitVault # can POST event information such as confirmed transactions. app = user.applications.create( name=u'bitcoin_app', callback_url=u'https://someapp.com/callback') ## Wallets # # Wallets belong to applications, not directly to users. They require # a passphrase to be provided on creation. wallet = app.wallets.create(passphrase=u'very insecure', name=u'my funds') # An application's wallet collection is enumerable for wallet in app.wallets.values(): print(wallet) # And acts as a hash with names as keys wallet = app.wallets[u'my funds'] # The passphrase is required to unlock the wallet before you can # perform any transactions with it. wallet.unlock(u'very insecure') ## Accounts # # Wallets can have multiple accounts, each represented by a path in the # MultiWallet's deterministic trees. account = wallet.accounts.create(name=u'office supplies') ## Payments # # Sending payments # Creating addresses for receiving payments # This is a BIP 16 "Pay to Script Hash" address, where the script in question # is a BIP 11 "multisig". payment_address = account.addresses.create # TODO: Additional method "prepare" to obtain unsigned transaction for inspection payment = account.pay(payees=({u'address': payment_address, u'amount': 20000},)) ## Transfers account_1 = wallet.accounts[u'rubber bands'] account_2 = wallet.accounts.create(name=u'travel expenses') wallet.transfer(amount=10000, source=account_1, destination=account_2)
Add high-level client usage scriptimport time import bitvault current_milli_time = lambda: int(round(time.time())) email = '{0}@bitvault.io'.format(current_milli_time()) password = u'incredibly_secure' ## API discovery # # The BitVault server provides a JSON description of its API that allows # the client to generate all necessary resource classes at runtime. # FIXME: This is from high_level.py; why aren't we just calling authed_client? client = bitvault.client(u'http://localhost:8998') ## User management # # The create action returns a User Resource which has: # # * action methods (get, update, reset) # * attributes (email, first_name, etc.) # * associated resources (applications) client.users.create(email=email, password=password) # Get an authenticated client representing the new user client = bitvault.authed_client(email=email, password=password) user = client.user ## Application management ## Fetch applications # # If the applications collection is not populated, it will be fetched from the # server. The cached version will be used if it is already loaded. A refresh can # be triggered by passing it as an option to the action. user.applications user.applications.refresh() ## Create an application. # # The optional callback_url attribute specifies a URL where BitVault # can POST event information such as confirmed transactions. app = user.applications.create( name=u'bitcoin_app', callback_url=u'https://someapp.com/callback') ## Wallets # # Wallets belong to applications, not directly to users. They require # a passphrase to be provided on creation. wallet = app.wallets.create(passphrase=u'very insecure', name=u'my funds') # An application's wallet collection is enumerable for wallet in app.wallets.values(): print(wallet) # And acts as a hash with names as keys wallet = app.wallets[u'my funds'] # The passphrase is required to unlock the wallet before you can # perform any transactions with it. wallet.unlock(u'very insecure') ## Accounts # # Wallets can have multiple accounts, each represented by a path in the # MultiWallet's deterministic trees. account = wallet.accounts.create(name=u'office supplies') ## Payments # # Sending payments # Creating addresses for receiving payments # This is a BIP 16 "Pay to Script Hash" address, where the script in question # is a BIP 11 "multisig". payment_address = account.addresses.create # TODO: Additional method "prepare" to obtain unsigned transaction for inspection payment = account.pay(payees=({u'address': payment_address, u'amount': 20000},)) ## Transfers account_1 = wallet.accounts[u'rubber bands'] account_2 = wallet.accounts.create(name=u'travel expenses') wallet.transfer(amount=10000, source=account_1, destination=account_2)
<commit_before><commit_msg>Add high-level client usage script<commit_after>import time import bitvault current_milli_time = lambda: int(round(time.time())) email = '{0}@bitvault.io'.format(current_milli_time()) password = u'incredibly_secure' ## API discovery # # The BitVault server provides a JSON description of its API that allows # the client to generate all necessary resource classes at runtime. # FIXME: This is from high_level.py; why aren't we just calling authed_client? client = bitvault.client(u'http://localhost:8998') ## User management # # The create action returns a User Resource which has: # # * action methods (get, update, reset) # * attributes (email, first_name, etc.) # * associated resources (applications) client.users.create(email=email, password=password) # Get an authenticated client representing the new user client = bitvault.authed_client(email=email, password=password) user = client.user ## Application management ## Fetch applications # # If the applications collection is not populated, it will be fetched from the # server. The cached version will be used if it is already loaded. A refresh can # be triggered by passing it as an option to the action. user.applications user.applications.refresh() ## Create an application. # # The optional callback_url attribute specifies a URL where BitVault # can POST event information such as confirmed transactions. app = user.applications.create( name=u'bitcoin_app', callback_url=u'https://someapp.com/callback') ## Wallets # # Wallets belong to applications, not directly to users. They require # a passphrase to be provided on creation. wallet = app.wallets.create(passphrase=u'very insecure', name=u'my funds') # An application's wallet collection is enumerable for wallet in app.wallets.values(): print(wallet) # And acts as a hash with names as keys wallet = app.wallets[u'my funds'] # The passphrase is required to unlock the wallet before you can # perform any transactions with it. wallet.unlock(u'very insecure') ## Accounts # # Wallets can have multiple accounts, each represented by a path in the # MultiWallet's deterministic trees. account = wallet.accounts.create(name=u'office supplies') ## Payments # # Sending payments # Creating addresses for receiving payments # This is a BIP 16 "Pay to Script Hash" address, where the script in question # is a BIP 11 "multisig". payment_address = account.addresses.create # TODO: Additional method "prepare" to obtain unsigned transaction for inspection payment = account.pay(payees=({u'address': payment_address, u'amount': 20000},)) ## Transfers account_1 = wallet.accounts[u'rubber bands'] account_2 = wallet.accounts.create(name=u'travel expenses') wallet.transfer(amount=10000, source=account_1, destination=account_2)
bda490f0e27b49d54ca7fa71e8725dcdf9da0861
registration/tests/test_provision.py
registration/tests/test_provision.py
# -*- coding: utf-8 -*- # # OpenCraft -- tools to aid developing and hosting free software projects # Copyright (C) 2015 OpenCraft <xavier@opencraft.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """ Test the provisioning of beta tester instances. """ # Imports ##################################################################### from unittest import mock from django.contrib.auth import get_user_model from django.test import TestCase from simple_email_confirmation.models import EmailAddress from registration.models import BetaTestApplication # Test cases ################################################################## class ApprovalTestCase(TestCase): """Test the provisioning of beta tester instances.""" def test_provision_instance(self): """Test that an instance gets correctly provisioned when the email addresses are confirmed.""" user = get_user_model().objects.create_user(username='test', email='test@example.com') application = BetaTestApplication.objects.create( user=user, subdomain='test', public_contact_email=user.email, ) EmailAddress.objects.create_unconfirmed(user.email, user) with mock.patch('registration.provision.spawn_appserver') as mock_spawn_appserver: # Confirm email address. This triggers provisioning the instance. EmailAddress.objects.confirm(user.email_address_set.get().key) self.assertTrue(mock_spawn_appserver.called) application.refresh_from_db() instance = application.instance self.assertIsNot(instance, None) self.assertEqual(instance.sub_domain, application.subdomain) self.assertEqual(instance.email, application.public_contact_email) self.assertEqual(instance.lms_users.get(), user)
Add unit test for beta test instance provisioning.
Add unit test for beta test instance provisioning.
Python
agpl-3.0
open-craft/opencraft,open-craft/opencraft,open-craft/opencraft,open-craft/opencraft,open-craft/opencraft
Add unit test for beta test instance provisioning.
# -*- coding: utf-8 -*- # # OpenCraft -- tools to aid developing and hosting free software projects # Copyright (C) 2015 OpenCraft <xavier@opencraft.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """ Test the provisioning of beta tester instances. """ # Imports ##################################################################### from unittest import mock from django.contrib.auth import get_user_model from django.test import TestCase from simple_email_confirmation.models import EmailAddress from registration.models import BetaTestApplication # Test cases ################################################################## class ApprovalTestCase(TestCase): """Test the provisioning of beta tester instances.""" def test_provision_instance(self): """Test that an instance gets correctly provisioned when the email addresses are confirmed.""" user = get_user_model().objects.create_user(username='test', email='test@example.com') application = BetaTestApplication.objects.create( user=user, subdomain='test', public_contact_email=user.email, ) EmailAddress.objects.create_unconfirmed(user.email, user) with mock.patch('registration.provision.spawn_appserver') as mock_spawn_appserver: # Confirm email address. This triggers provisioning the instance. EmailAddress.objects.confirm(user.email_address_set.get().key) self.assertTrue(mock_spawn_appserver.called) application.refresh_from_db() instance = application.instance self.assertIsNot(instance, None) self.assertEqual(instance.sub_domain, application.subdomain) self.assertEqual(instance.email, application.public_contact_email) self.assertEqual(instance.lms_users.get(), user)
<commit_before><commit_msg>Add unit test for beta test instance provisioning.<commit_after>
# -*- coding: utf-8 -*- # # OpenCraft -- tools to aid developing and hosting free software projects # Copyright (C) 2015 OpenCraft <xavier@opencraft.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """ Test the provisioning of beta tester instances. """ # Imports ##################################################################### from unittest import mock from django.contrib.auth import get_user_model from django.test import TestCase from simple_email_confirmation.models import EmailAddress from registration.models import BetaTestApplication # Test cases ################################################################## class ApprovalTestCase(TestCase): """Test the provisioning of beta tester instances.""" def test_provision_instance(self): """Test that an instance gets correctly provisioned when the email addresses are confirmed.""" user = get_user_model().objects.create_user(username='test', email='test@example.com') application = BetaTestApplication.objects.create( user=user, subdomain='test', public_contact_email=user.email, ) EmailAddress.objects.create_unconfirmed(user.email, user) with mock.patch('registration.provision.spawn_appserver') as mock_spawn_appserver: # Confirm email address. This triggers provisioning the instance. EmailAddress.objects.confirm(user.email_address_set.get().key) self.assertTrue(mock_spawn_appserver.called) application.refresh_from_db() instance = application.instance self.assertIsNot(instance, None) self.assertEqual(instance.sub_domain, application.subdomain) self.assertEqual(instance.email, application.public_contact_email) self.assertEqual(instance.lms_users.get(), user)
Add unit test for beta test instance provisioning.# -*- coding: utf-8 -*- # # OpenCraft -- tools to aid developing and hosting free software projects # Copyright (C) 2015 OpenCraft <xavier@opencraft.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """ Test the provisioning of beta tester instances. """ # Imports ##################################################################### from unittest import mock from django.contrib.auth import get_user_model from django.test import TestCase from simple_email_confirmation.models import EmailAddress from registration.models import BetaTestApplication # Test cases ################################################################## class ApprovalTestCase(TestCase): """Test the provisioning of beta tester instances.""" def test_provision_instance(self): """Test that an instance gets correctly provisioned when the email addresses are confirmed.""" user = get_user_model().objects.create_user(username='test', email='test@example.com') application = BetaTestApplication.objects.create( user=user, subdomain='test', public_contact_email=user.email, ) EmailAddress.objects.create_unconfirmed(user.email, user) with mock.patch('registration.provision.spawn_appserver') as mock_spawn_appserver: # Confirm email address. This triggers provisioning the instance. EmailAddress.objects.confirm(user.email_address_set.get().key) self.assertTrue(mock_spawn_appserver.called) application.refresh_from_db() instance = application.instance self.assertIsNot(instance, None) self.assertEqual(instance.sub_domain, application.subdomain) self.assertEqual(instance.email, application.public_contact_email) self.assertEqual(instance.lms_users.get(), user)
<commit_before><commit_msg>Add unit test for beta test instance provisioning.<commit_after># -*- coding: utf-8 -*- # # OpenCraft -- tools to aid developing and hosting free software projects # Copyright (C) 2015 OpenCraft <xavier@opencraft.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # """ Test the provisioning of beta tester instances. """ # Imports ##################################################################### from unittest import mock from django.contrib.auth import get_user_model from django.test import TestCase from simple_email_confirmation.models import EmailAddress from registration.models import BetaTestApplication # Test cases ################################################################## class ApprovalTestCase(TestCase): """Test the provisioning of beta tester instances.""" def test_provision_instance(self): """Test that an instance gets correctly provisioned when the email addresses are confirmed.""" user = get_user_model().objects.create_user(username='test', email='test@example.com') application = BetaTestApplication.objects.create( user=user, subdomain='test', public_contact_email=user.email, ) EmailAddress.objects.create_unconfirmed(user.email, user) with mock.patch('registration.provision.spawn_appserver') as mock_spawn_appserver: # Confirm email address. This triggers provisioning the instance. EmailAddress.objects.confirm(user.email_address_set.get().key) self.assertTrue(mock_spawn_appserver.called) application.refresh_from_db() instance = application.instance self.assertIsNot(instance, None) self.assertEqual(instance.sub_domain, application.subdomain) self.assertEqual(instance.email, application.public_contact_email) self.assertEqual(instance.lms_users.get(), user)
dc09ffa8abd791af99dfc28bced6aa33c46308f8
Python_Data/smpl3.py
Python_Data/smpl3.py
from random import randint as rndInt def main(): l = list([rndInt(1,100) for x in range(rndInt(5,900))]) # min_1(l) # min_2(l) # Simple O(n²) def min_1(list): minOut = list[0] for i in list: minLoc = i for j in list: if j < minLoc: minLoc = j if minLoc < minOut: minOut = minLoc print(minOut) # Typical O(n) approach def min_2(list): minL = list[0] for i in list: if i < minL: minL = i print(minL) if __name__ == '__main__': main()
Add very simple algorithm for teaching purposes about the big O notation
Add very simple algorithm for teaching purposes about the big O notation
Python
unlicense
robotenique/RandomAccessMemory,robotenique/RandomAccessMemory,robotenique/RandomAccessMemory
Add very simple algorithm for teaching purposes about the big O notation
from random import randint as rndInt def main(): l = list([rndInt(1,100) for x in range(rndInt(5,900))]) # min_1(l) # min_2(l) # Simple O(n²) def min_1(list): minOut = list[0] for i in list: minLoc = i for j in list: if j < minLoc: minLoc = j if minLoc < minOut: minOut = minLoc print(minOut) # Typical O(n) approach def min_2(list): minL = list[0] for i in list: if i < minL: minL = i print(minL) if __name__ == '__main__': main()
<commit_before><commit_msg>Add very simple algorithm for teaching purposes about the big O notation<commit_after>
from random import randint as rndInt def main(): l = list([rndInt(1,100) for x in range(rndInt(5,900))]) # min_1(l) # min_2(l) # Simple O(n²) def min_1(list): minOut = list[0] for i in list: minLoc = i for j in list: if j < minLoc: minLoc = j if minLoc < minOut: minOut = minLoc print(minOut) # Typical O(n) approach def min_2(list): minL = list[0] for i in list: if i < minL: minL = i print(minL) if __name__ == '__main__': main()
Add very simple algorithm for teaching purposes about the big O notationfrom random import randint as rndInt def main(): l = list([rndInt(1,100) for x in range(rndInt(5,900))]) # min_1(l) # min_2(l) # Simple O(n²) def min_1(list): minOut = list[0] for i in list: minLoc = i for j in list: if j < minLoc: minLoc = j if minLoc < minOut: minOut = minLoc print(minOut) # Typical O(n) approach def min_2(list): minL = list[0] for i in list: if i < minL: minL = i print(minL) if __name__ == '__main__': main()
<commit_before><commit_msg>Add very simple algorithm for teaching purposes about the big O notation<commit_after>from random import randint as rndInt def main(): l = list([rndInt(1,100) for x in range(rndInt(5,900))]) # min_1(l) # min_2(l) # Simple O(n²) def min_1(list): minOut = list[0] for i in list: minLoc = i for j in list: if j < minLoc: minLoc = j if minLoc < minOut: minOut = minLoc print(minOut) # Typical O(n) approach def min_2(list): minL = list[0] for i in list: if i < minL: minL = i print(minL) if __name__ == '__main__': main()
6c7d6881751784e30324795394abfb8fc3eb75a8
demo/amqp_clock.py
demo/amqp_clock.py
#!/usr/bin/env python """ AMQP Clock Fires off simple messages at one-minute intervals to a topic exchange named 'clock', with the topic of the message being the local time as 'year.month.date.dow.hour.minute', for example: '2007.11.26.1.12.33', where the dow (day of week) is 0 for Sunday, 1 for Monday, and so on (similar to Unix crontab). A consumer could then bind a queue to the routing key '#.0' for example to get a message at the beginning of each hour. 2007-11-26 Barry Pederson <bp@barryp.org> """ from datetime import datetime from optparse import OptionParser from time import sleep import amqplib.client_0_8 as amqp Message = amqp.Message EXCHANGE_NAME = 'clock' TOPIC_PATTERN = '%Y.%m.%d.%w.%H.%M' # Python datetime.strftime() pattern def main(): parser = OptionParser() parser.add_option('--host', dest='host', help='AMQP server to connect to (default: %default)', default='localhost') parser.add_option('-u', '--userid', dest='userid', help='AMQP userid to authenticate as (default: %default)', default='guest') parser.add_option('-p', '--password', dest='password', help='AMQP password to authenticate with (default: %default)', default='guest') parser.add_option('--ssl', dest='ssl', action='store_true', help='Enable SSL with AMQP server (default: not enabled)', default=False) options, args = parser.parse_args() conn = amqp.Connection(options.host, options.userid, options.password) ch = conn.channel() ch.access_request('/data', write=True, active=True) ch.exchange_declare(EXCHANGE_NAME, type='topic') # Make sure our first message is close to the beginning # of a minute now = datetime.now() if now.second > 0: sleep(60 - now.second) while True: now = datetime.now() msg = Message(timestamp=now) topic = now.strftime(TOPIC_PATTERN) ch.basic_publish(msg, EXCHANGE_NAME, routing_key=topic) # Don't know how long the basic_publish took, so # grab the time again. now = datetime.now() sleep(60 - now.second) ch.close() conn.close() if __name__ == '__main__': main()
Add another demo program, one that spits out messages at regular intervals.
Add another demo program, one that spits out messages at regular intervals.
Python
lgpl-2.1
kmonsoor/py-amqplib,ebin123456/py-amqplib,jaknight/py-amqplib,barryp/py-amqplib
Add another demo program, one that spits out messages at regular intervals.
#!/usr/bin/env python """ AMQP Clock Fires off simple messages at one-minute intervals to a topic exchange named 'clock', with the topic of the message being the local time as 'year.month.date.dow.hour.minute', for example: '2007.11.26.1.12.33', where the dow (day of week) is 0 for Sunday, 1 for Monday, and so on (similar to Unix crontab). A consumer could then bind a queue to the routing key '#.0' for example to get a message at the beginning of each hour. 2007-11-26 Barry Pederson <bp@barryp.org> """ from datetime import datetime from optparse import OptionParser from time import sleep import amqplib.client_0_8 as amqp Message = amqp.Message EXCHANGE_NAME = 'clock' TOPIC_PATTERN = '%Y.%m.%d.%w.%H.%M' # Python datetime.strftime() pattern def main(): parser = OptionParser() parser.add_option('--host', dest='host', help='AMQP server to connect to (default: %default)', default='localhost') parser.add_option('-u', '--userid', dest='userid', help='AMQP userid to authenticate as (default: %default)', default='guest') parser.add_option('-p', '--password', dest='password', help='AMQP password to authenticate with (default: %default)', default='guest') parser.add_option('--ssl', dest='ssl', action='store_true', help='Enable SSL with AMQP server (default: not enabled)', default=False) options, args = parser.parse_args() conn = amqp.Connection(options.host, options.userid, options.password) ch = conn.channel() ch.access_request('/data', write=True, active=True) ch.exchange_declare(EXCHANGE_NAME, type='topic') # Make sure our first message is close to the beginning # of a minute now = datetime.now() if now.second > 0: sleep(60 - now.second) while True: now = datetime.now() msg = Message(timestamp=now) topic = now.strftime(TOPIC_PATTERN) ch.basic_publish(msg, EXCHANGE_NAME, routing_key=topic) # Don't know how long the basic_publish took, so # grab the time again. now = datetime.now() sleep(60 - now.second) ch.close() conn.close() if __name__ == '__main__': main()
<commit_before><commit_msg>Add another demo program, one that spits out messages at regular intervals.<commit_after>
#!/usr/bin/env python """ AMQP Clock Fires off simple messages at one-minute intervals to a topic exchange named 'clock', with the topic of the message being the local time as 'year.month.date.dow.hour.minute', for example: '2007.11.26.1.12.33', where the dow (day of week) is 0 for Sunday, 1 for Monday, and so on (similar to Unix crontab). A consumer could then bind a queue to the routing key '#.0' for example to get a message at the beginning of each hour. 2007-11-26 Barry Pederson <bp@barryp.org> """ from datetime import datetime from optparse import OptionParser from time import sleep import amqplib.client_0_8 as amqp Message = amqp.Message EXCHANGE_NAME = 'clock' TOPIC_PATTERN = '%Y.%m.%d.%w.%H.%M' # Python datetime.strftime() pattern def main(): parser = OptionParser() parser.add_option('--host', dest='host', help='AMQP server to connect to (default: %default)', default='localhost') parser.add_option('-u', '--userid', dest='userid', help='AMQP userid to authenticate as (default: %default)', default='guest') parser.add_option('-p', '--password', dest='password', help='AMQP password to authenticate with (default: %default)', default='guest') parser.add_option('--ssl', dest='ssl', action='store_true', help='Enable SSL with AMQP server (default: not enabled)', default=False) options, args = parser.parse_args() conn = amqp.Connection(options.host, options.userid, options.password) ch = conn.channel() ch.access_request('/data', write=True, active=True) ch.exchange_declare(EXCHANGE_NAME, type='topic') # Make sure our first message is close to the beginning # of a minute now = datetime.now() if now.second > 0: sleep(60 - now.second) while True: now = datetime.now() msg = Message(timestamp=now) topic = now.strftime(TOPIC_PATTERN) ch.basic_publish(msg, EXCHANGE_NAME, routing_key=topic) # Don't know how long the basic_publish took, so # grab the time again. now = datetime.now() sleep(60 - now.second) ch.close() conn.close() if __name__ == '__main__': main()
Add another demo program, one that spits out messages at regular intervals.#!/usr/bin/env python """ AMQP Clock Fires off simple messages at one-minute intervals to a topic exchange named 'clock', with the topic of the message being the local time as 'year.month.date.dow.hour.minute', for example: '2007.11.26.1.12.33', where the dow (day of week) is 0 for Sunday, 1 for Monday, and so on (similar to Unix crontab). A consumer could then bind a queue to the routing key '#.0' for example to get a message at the beginning of each hour. 2007-11-26 Barry Pederson <bp@barryp.org> """ from datetime import datetime from optparse import OptionParser from time import sleep import amqplib.client_0_8 as amqp Message = amqp.Message EXCHANGE_NAME = 'clock' TOPIC_PATTERN = '%Y.%m.%d.%w.%H.%M' # Python datetime.strftime() pattern def main(): parser = OptionParser() parser.add_option('--host', dest='host', help='AMQP server to connect to (default: %default)', default='localhost') parser.add_option('-u', '--userid', dest='userid', help='AMQP userid to authenticate as (default: %default)', default='guest') parser.add_option('-p', '--password', dest='password', help='AMQP password to authenticate with (default: %default)', default='guest') parser.add_option('--ssl', dest='ssl', action='store_true', help='Enable SSL with AMQP server (default: not enabled)', default=False) options, args = parser.parse_args() conn = amqp.Connection(options.host, options.userid, options.password) ch = conn.channel() ch.access_request('/data', write=True, active=True) ch.exchange_declare(EXCHANGE_NAME, type='topic') # Make sure our first message is close to the beginning # of a minute now = datetime.now() if now.second > 0: sleep(60 - now.second) while True: now = datetime.now() msg = Message(timestamp=now) topic = now.strftime(TOPIC_PATTERN) ch.basic_publish(msg, EXCHANGE_NAME, routing_key=topic) # Don't know how long the basic_publish took, so # grab the time again. now = datetime.now() sleep(60 - now.second) ch.close() conn.close() if __name__ == '__main__': main()
<commit_before><commit_msg>Add another demo program, one that spits out messages at regular intervals.<commit_after>#!/usr/bin/env python """ AMQP Clock Fires off simple messages at one-minute intervals to a topic exchange named 'clock', with the topic of the message being the local time as 'year.month.date.dow.hour.minute', for example: '2007.11.26.1.12.33', where the dow (day of week) is 0 for Sunday, 1 for Monday, and so on (similar to Unix crontab). A consumer could then bind a queue to the routing key '#.0' for example to get a message at the beginning of each hour. 2007-11-26 Barry Pederson <bp@barryp.org> """ from datetime import datetime from optparse import OptionParser from time import sleep import amqplib.client_0_8 as amqp Message = amqp.Message EXCHANGE_NAME = 'clock' TOPIC_PATTERN = '%Y.%m.%d.%w.%H.%M' # Python datetime.strftime() pattern def main(): parser = OptionParser() parser.add_option('--host', dest='host', help='AMQP server to connect to (default: %default)', default='localhost') parser.add_option('-u', '--userid', dest='userid', help='AMQP userid to authenticate as (default: %default)', default='guest') parser.add_option('-p', '--password', dest='password', help='AMQP password to authenticate with (default: %default)', default='guest') parser.add_option('--ssl', dest='ssl', action='store_true', help='Enable SSL with AMQP server (default: not enabled)', default=False) options, args = parser.parse_args() conn = amqp.Connection(options.host, options.userid, options.password) ch = conn.channel() ch.access_request('/data', write=True, active=True) ch.exchange_declare(EXCHANGE_NAME, type='topic') # Make sure our first message is close to the beginning # of a minute now = datetime.now() if now.second > 0: sleep(60 - now.second) while True: now = datetime.now() msg = Message(timestamp=now) topic = now.strftime(TOPIC_PATTERN) ch.basic_publish(msg, EXCHANGE_NAME, routing_key=topic) # Don't know how long the basic_publish took, so # grab the time again. now = datetime.now() sleep(60 - now.second) ch.close() conn.close() if __name__ == '__main__': main()
4f4636a04b01ee0ebe45c55c98328540be13e990
napper_memaslap.py
napper_memaslap.py
import sys, socket, time, logging import shlex, subprocess from kazoo.client import KazooClient from kazoo.exceptions import NodeExistsError def zkConnect(conn_str): zk = KazooClient(hosts=conn_str) zk.start() return zk def zkCreateJobDir(zk, job_name): zk.ensure_path("/napper/memaslap/%s" % (job_name)) def zkRemoveJobDir(zk, job_name): zk.delete("/napper/memaslap/%s" % (job_name), recursive=True) def zkRegisterWorker(zk, job_name, hostname, port): print "Registering myself as %s:%d" % (hostname, port) zk.create("/napper/memaslap/%s/%s:%d" % (job_name, hostname, port), "%d" % (port), ephemeral=True) return port logging.basicConfig() if len(sys.argv) < 6: print "usage: napper_memaslap <Zookeeper hostname:port> <job name> <worker ID> <num workers> <executable>" sys.exit(1) hostport = sys.argv[1] job_name = sys.argv[2] worker_id = int(sys.argv[3]) num_workers = int(sys.argv[4]) memaslap_path = " ".join(sys.argv[5:]) client = zkConnect(hostport) zkCreateJobDir(client, job_name) hosts = [] done = False children = client.get_children("/napper/memcached/") for c in children: if not ":" in c: continue data, stat = client.get("/napper/memcached/%s" % (c)) print "%s:%s" % (c, data) hosts.append("%s" % (c)) # execute program command = "%s -s %s -T 1 -t 10s" % (memaslap_path, ",".join(hosts)) print "RUNNING: %s" % (command) subprocess.call(shlex.split(command)) # this will implicitly clean up afterwards client.stop() sys.exit(0)
Add wrapper for memaslap load tester.
Add wrapper for memaslap load tester.
Python
mit
ms705/napper
Add wrapper for memaslap load tester.
import sys, socket, time, logging import shlex, subprocess from kazoo.client import KazooClient from kazoo.exceptions import NodeExistsError def zkConnect(conn_str): zk = KazooClient(hosts=conn_str) zk.start() return zk def zkCreateJobDir(zk, job_name): zk.ensure_path("/napper/memaslap/%s" % (job_name)) def zkRemoveJobDir(zk, job_name): zk.delete("/napper/memaslap/%s" % (job_name), recursive=True) def zkRegisterWorker(zk, job_name, hostname, port): print "Registering myself as %s:%d" % (hostname, port) zk.create("/napper/memaslap/%s/%s:%d" % (job_name, hostname, port), "%d" % (port), ephemeral=True) return port logging.basicConfig() if len(sys.argv) < 6: print "usage: napper_memaslap <Zookeeper hostname:port> <job name> <worker ID> <num workers> <executable>" sys.exit(1) hostport = sys.argv[1] job_name = sys.argv[2] worker_id = int(sys.argv[3]) num_workers = int(sys.argv[4]) memaslap_path = " ".join(sys.argv[5:]) client = zkConnect(hostport) zkCreateJobDir(client, job_name) hosts = [] done = False children = client.get_children("/napper/memcached/") for c in children: if not ":" in c: continue data, stat = client.get("/napper/memcached/%s" % (c)) print "%s:%s" % (c, data) hosts.append("%s" % (c)) # execute program command = "%s -s %s -T 1 -t 10s" % (memaslap_path, ",".join(hosts)) print "RUNNING: %s" % (command) subprocess.call(shlex.split(command)) # this will implicitly clean up afterwards client.stop() sys.exit(0)
<commit_before><commit_msg>Add wrapper for memaslap load tester.<commit_after>
import sys, socket, time, logging import shlex, subprocess from kazoo.client import KazooClient from kazoo.exceptions import NodeExistsError def zkConnect(conn_str): zk = KazooClient(hosts=conn_str) zk.start() return zk def zkCreateJobDir(zk, job_name): zk.ensure_path("/napper/memaslap/%s" % (job_name)) def zkRemoveJobDir(zk, job_name): zk.delete("/napper/memaslap/%s" % (job_name), recursive=True) def zkRegisterWorker(zk, job_name, hostname, port): print "Registering myself as %s:%d" % (hostname, port) zk.create("/napper/memaslap/%s/%s:%d" % (job_name, hostname, port), "%d" % (port), ephemeral=True) return port logging.basicConfig() if len(sys.argv) < 6: print "usage: napper_memaslap <Zookeeper hostname:port> <job name> <worker ID> <num workers> <executable>" sys.exit(1) hostport = sys.argv[1] job_name = sys.argv[2] worker_id = int(sys.argv[3]) num_workers = int(sys.argv[4]) memaslap_path = " ".join(sys.argv[5:]) client = zkConnect(hostport) zkCreateJobDir(client, job_name) hosts = [] done = False children = client.get_children("/napper/memcached/") for c in children: if not ":" in c: continue data, stat = client.get("/napper/memcached/%s" % (c)) print "%s:%s" % (c, data) hosts.append("%s" % (c)) # execute program command = "%s -s %s -T 1 -t 10s" % (memaslap_path, ",".join(hosts)) print "RUNNING: %s" % (command) subprocess.call(shlex.split(command)) # this will implicitly clean up afterwards client.stop() sys.exit(0)
Add wrapper for memaslap load tester.import sys, socket, time, logging import shlex, subprocess from kazoo.client import KazooClient from kazoo.exceptions import NodeExistsError def zkConnect(conn_str): zk = KazooClient(hosts=conn_str) zk.start() return zk def zkCreateJobDir(zk, job_name): zk.ensure_path("/napper/memaslap/%s" % (job_name)) def zkRemoveJobDir(zk, job_name): zk.delete("/napper/memaslap/%s" % (job_name), recursive=True) def zkRegisterWorker(zk, job_name, hostname, port): print "Registering myself as %s:%d" % (hostname, port) zk.create("/napper/memaslap/%s/%s:%d" % (job_name, hostname, port), "%d" % (port), ephemeral=True) return port logging.basicConfig() if len(sys.argv) < 6: print "usage: napper_memaslap <Zookeeper hostname:port> <job name> <worker ID> <num workers> <executable>" sys.exit(1) hostport = sys.argv[1] job_name = sys.argv[2] worker_id = int(sys.argv[3]) num_workers = int(sys.argv[4]) memaslap_path = " ".join(sys.argv[5:]) client = zkConnect(hostport) zkCreateJobDir(client, job_name) hosts = [] done = False children = client.get_children("/napper/memcached/") for c in children: if not ":" in c: continue data, stat = client.get("/napper/memcached/%s" % (c)) print "%s:%s" % (c, data) hosts.append("%s" % (c)) # execute program command = "%s -s %s -T 1 -t 10s" % (memaslap_path, ",".join(hosts)) print "RUNNING: %s" % (command) subprocess.call(shlex.split(command)) # this will implicitly clean up afterwards client.stop() sys.exit(0)
<commit_before><commit_msg>Add wrapper for memaslap load tester.<commit_after>import sys, socket, time, logging import shlex, subprocess from kazoo.client import KazooClient from kazoo.exceptions import NodeExistsError def zkConnect(conn_str): zk = KazooClient(hosts=conn_str) zk.start() return zk def zkCreateJobDir(zk, job_name): zk.ensure_path("/napper/memaslap/%s" % (job_name)) def zkRemoveJobDir(zk, job_name): zk.delete("/napper/memaslap/%s" % (job_name), recursive=True) def zkRegisterWorker(zk, job_name, hostname, port): print "Registering myself as %s:%d" % (hostname, port) zk.create("/napper/memaslap/%s/%s:%d" % (job_name, hostname, port), "%d" % (port), ephemeral=True) return port logging.basicConfig() if len(sys.argv) < 6: print "usage: napper_memaslap <Zookeeper hostname:port> <job name> <worker ID> <num workers> <executable>" sys.exit(1) hostport = sys.argv[1] job_name = sys.argv[2] worker_id = int(sys.argv[3]) num_workers = int(sys.argv[4]) memaslap_path = " ".join(sys.argv[5:]) client = zkConnect(hostport) zkCreateJobDir(client, job_name) hosts = [] done = False children = client.get_children("/napper/memcached/") for c in children: if not ":" in c: continue data, stat = client.get("/napper/memcached/%s" % (c)) print "%s:%s" % (c, data) hosts.append("%s" % (c)) # execute program command = "%s -s %s -T 1 -t 10s" % (memaslap_path, ",".join(hosts)) print "RUNNING: %s" % (command) subprocess.call(shlex.split(command)) # this will implicitly clean up afterwards client.stop() sys.exit(0)
9c307606152fc28a7d017c39d8621433e6485034
scripts/contrib/inject_s2s_config.py
scripts/contrib/inject_s2s_config.py
#!/usr/bin/env python from __future__ import print_function import sys import argparse import numpy as np import json import yaml DESC = "Adds special node including s2s options to Nematus model.npz file." S2S_SPECIAL_NODE = "special:model.yml" def main(): args = parse_args() print("Loading config {}".format(args.json)) with open(args.json) as json_io: nematus_config = json.load(json_io) s2s_config = { "type": "nematus", "dim-vocabs": [nematus_config["n_words_src"], nematus_config["n_words"]], "dim-emb": nematus_config["dim_word"], "dim-rnn": nematus_config["dim"], "enc-type": "bidirectional", "enc-cell": "gru-nematus", "enc-cell-depth": nematus_config["enc_recurrence_transition_depth"], "enc-depth": nematus_config["enc_depth"], "dec-cell": "gru-nematus", "dec-cell-base-depth": nematus_config["dec_base_recurrence_transition_depth"], "dec-cell-high-depth": nematus_config["dec_high_recurrence_transition_depth"], "dec-depth": nematus_config["dec_depth"], "layer-normalization": nematus_config["layer_normalisation"], "tied-embeddings": nematus_config["tie_decoder_embeddings"], "skip": False, "special-vocab": [], } print("Loading model {}".format(args.model)) model = np.load(args.model) if S2S_SPECIAL_NODE in model: print("Found the following s2s parameters in model:\n") print(model[S2S_SPECIAL_NODE]) if not args.force: print("Use -f/--force to overwrite") exit() s2s_node = str.encode(yaml.dump(s2s_config).strip() + "\n") s2s_model = {S2S_SPECIAL_NODE: s2s_node} print("Updating model...") for tensor_name in model: if tensor_name != S2S_SPECIAL_NODE: s2s_model[tensor_name] = model[tensor_name] np.savez(args.model, **s2s_model) def parse_args(): parser = argparse.ArgumentParser(description=DESC) parser.add_argument( "-j", "--json", help="nematus config (model.npz.json)", required=True) parser.add_argument( "-m", "--model", help="nematus model (model.npz)", required=True) parser.add_argument( "-f", "--force", help="", action="store_true") return parser.parse_args() if __name__ == "__main__": main()
Add script injecting s2s options to nematus .npz models
Add script injecting s2s options to nematus .npz models
Python
mit
marian-nmt/marian-train,emjotde/amunmt,emjotde/amunmt,marian-nmt/marian-train,emjotde/amunn,emjotde/amunmt,amunmt/marian,emjotde/amunmt,emjotde/amunn,marian-nmt/marian-train,marian-nmt/marian-train,emjotde/amunn,amunmt/marian,emjotde/amunn,amunmt/marian,marian-nmt/marian-train,emjotde/Marian,emjotde/Marian
Add script injecting s2s options to nematus .npz models
#!/usr/bin/env python from __future__ import print_function import sys import argparse import numpy as np import json import yaml DESC = "Adds special node including s2s options to Nematus model.npz file." S2S_SPECIAL_NODE = "special:model.yml" def main(): args = parse_args() print("Loading config {}".format(args.json)) with open(args.json) as json_io: nematus_config = json.load(json_io) s2s_config = { "type": "nematus", "dim-vocabs": [nematus_config["n_words_src"], nematus_config["n_words"]], "dim-emb": nematus_config["dim_word"], "dim-rnn": nematus_config["dim"], "enc-type": "bidirectional", "enc-cell": "gru-nematus", "enc-cell-depth": nematus_config["enc_recurrence_transition_depth"], "enc-depth": nematus_config["enc_depth"], "dec-cell": "gru-nematus", "dec-cell-base-depth": nematus_config["dec_base_recurrence_transition_depth"], "dec-cell-high-depth": nematus_config["dec_high_recurrence_transition_depth"], "dec-depth": nematus_config["dec_depth"], "layer-normalization": nematus_config["layer_normalisation"], "tied-embeddings": nematus_config["tie_decoder_embeddings"], "skip": False, "special-vocab": [], } print("Loading model {}".format(args.model)) model = np.load(args.model) if S2S_SPECIAL_NODE in model: print("Found the following s2s parameters in model:\n") print(model[S2S_SPECIAL_NODE]) if not args.force: print("Use -f/--force to overwrite") exit() s2s_node = str.encode(yaml.dump(s2s_config).strip() + "\n") s2s_model = {S2S_SPECIAL_NODE: s2s_node} print("Updating model...") for tensor_name in model: if tensor_name != S2S_SPECIAL_NODE: s2s_model[tensor_name] = model[tensor_name] np.savez(args.model, **s2s_model) def parse_args(): parser = argparse.ArgumentParser(description=DESC) parser.add_argument( "-j", "--json", help="nematus config (model.npz.json)", required=True) parser.add_argument( "-m", "--model", help="nematus model (model.npz)", required=True) parser.add_argument( "-f", "--force", help="", action="store_true") return parser.parse_args() if __name__ == "__main__": main()
<commit_before><commit_msg>Add script injecting s2s options to nematus .npz models<commit_after>
#!/usr/bin/env python from __future__ import print_function import sys import argparse import numpy as np import json import yaml DESC = "Adds special node including s2s options to Nematus model.npz file." S2S_SPECIAL_NODE = "special:model.yml" def main(): args = parse_args() print("Loading config {}".format(args.json)) with open(args.json) as json_io: nematus_config = json.load(json_io) s2s_config = { "type": "nematus", "dim-vocabs": [nematus_config["n_words_src"], nematus_config["n_words"]], "dim-emb": nematus_config["dim_word"], "dim-rnn": nematus_config["dim"], "enc-type": "bidirectional", "enc-cell": "gru-nematus", "enc-cell-depth": nematus_config["enc_recurrence_transition_depth"], "enc-depth": nematus_config["enc_depth"], "dec-cell": "gru-nematus", "dec-cell-base-depth": nematus_config["dec_base_recurrence_transition_depth"], "dec-cell-high-depth": nematus_config["dec_high_recurrence_transition_depth"], "dec-depth": nematus_config["dec_depth"], "layer-normalization": nematus_config["layer_normalisation"], "tied-embeddings": nematus_config["tie_decoder_embeddings"], "skip": False, "special-vocab": [], } print("Loading model {}".format(args.model)) model = np.load(args.model) if S2S_SPECIAL_NODE in model: print("Found the following s2s parameters in model:\n") print(model[S2S_SPECIAL_NODE]) if not args.force: print("Use -f/--force to overwrite") exit() s2s_node = str.encode(yaml.dump(s2s_config).strip() + "\n") s2s_model = {S2S_SPECIAL_NODE: s2s_node} print("Updating model...") for tensor_name in model: if tensor_name != S2S_SPECIAL_NODE: s2s_model[tensor_name] = model[tensor_name] np.savez(args.model, **s2s_model) def parse_args(): parser = argparse.ArgumentParser(description=DESC) parser.add_argument( "-j", "--json", help="nematus config (model.npz.json)", required=True) parser.add_argument( "-m", "--model", help="nematus model (model.npz)", required=True) parser.add_argument( "-f", "--force", help="", action="store_true") return parser.parse_args() if __name__ == "__main__": main()
Add script injecting s2s options to nematus .npz models#!/usr/bin/env python from __future__ import print_function import sys import argparse import numpy as np import json import yaml DESC = "Adds special node including s2s options to Nematus model.npz file." S2S_SPECIAL_NODE = "special:model.yml" def main(): args = parse_args() print("Loading config {}".format(args.json)) with open(args.json) as json_io: nematus_config = json.load(json_io) s2s_config = { "type": "nematus", "dim-vocabs": [nematus_config["n_words_src"], nematus_config["n_words"]], "dim-emb": nematus_config["dim_word"], "dim-rnn": nematus_config["dim"], "enc-type": "bidirectional", "enc-cell": "gru-nematus", "enc-cell-depth": nematus_config["enc_recurrence_transition_depth"], "enc-depth": nematus_config["enc_depth"], "dec-cell": "gru-nematus", "dec-cell-base-depth": nematus_config["dec_base_recurrence_transition_depth"], "dec-cell-high-depth": nematus_config["dec_high_recurrence_transition_depth"], "dec-depth": nematus_config["dec_depth"], "layer-normalization": nematus_config["layer_normalisation"], "tied-embeddings": nematus_config["tie_decoder_embeddings"], "skip": False, "special-vocab": [], } print("Loading model {}".format(args.model)) model = np.load(args.model) if S2S_SPECIAL_NODE in model: print("Found the following s2s parameters in model:\n") print(model[S2S_SPECIAL_NODE]) if not args.force: print("Use -f/--force to overwrite") exit() s2s_node = str.encode(yaml.dump(s2s_config).strip() + "\n") s2s_model = {S2S_SPECIAL_NODE: s2s_node} print("Updating model...") for tensor_name in model: if tensor_name != S2S_SPECIAL_NODE: s2s_model[tensor_name] = model[tensor_name] np.savez(args.model, **s2s_model) def parse_args(): parser = argparse.ArgumentParser(description=DESC) parser.add_argument( "-j", "--json", help="nematus config (model.npz.json)", required=True) parser.add_argument( "-m", "--model", help="nematus model (model.npz)", required=True) parser.add_argument( "-f", "--force", help="", action="store_true") return parser.parse_args() if __name__ == "__main__": main()
<commit_before><commit_msg>Add script injecting s2s options to nematus .npz models<commit_after>#!/usr/bin/env python from __future__ import print_function import sys import argparse import numpy as np import json import yaml DESC = "Adds special node including s2s options to Nematus model.npz file." S2S_SPECIAL_NODE = "special:model.yml" def main(): args = parse_args() print("Loading config {}".format(args.json)) with open(args.json) as json_io: nematus_config = json.load(json_io) s2s_config = { "type": "nematus", "dim-vocabs": [nematus_config["n_words_src"], nematus_config["n_words"]], "dim-emb": nematus_config["dim_word"], "dim-rnn": nematus_config["dim"], "enc-type": "bidirectional", "enc-cell": "gru-nematus", "enc-cell-depth": nematus_config["enc_recurrence_transition_depth"], "enc-depth": nematus_config["enc_depth"], "dec-cell": "gru-nematus", "dec-cell-base-depth": nematus_config["dec_base_recurrence_transition_depth"], "dec-cell-high-depth": nematus_config["dec_high_recurrence_transition_depth"], "dec-depth": nematus_config["dec_depth"], "layer-normalization": nematus_config["layer_normalisation"], "tied-embeddings": nematus_config["tie_decoder_embeddings"], "skip": False, "special-vocab": [], } print("Loading model {}".format(args.model)) model = np.load(args.model) if S2S_SPECIAL_NODE in model: print("Found the following s2s parameters in model:\n") print(model[S2S_SPECIAL_NODE]) if not args.force: print("Use -f/--force to overwrite") exit() s2s_node = str.encode(yaml.dump(s2s_config).strip() + "\n") s2s_model = {S2S_SPECIAL_NODE: s2s_node} print("Updating model...") for tensor_name in model: if tensor_name != S2S_SPECIAL_NODE: s2s_model[tensor_name] = model[tensor_name] np.savez(args.model, **s2s_model) def parse_args(): parser = argparse.ArgumentParser(description=DESC) parser.add_argument( "-j", "--json", help="nematus config (model.npz.json)", required=True) parser.add_argument( "-m", "--model", help="nematus model (model.npz)", required=True) parser.add_argument( "-f", "--force", help="", action="store_true") return parser.parse_args() if __name__ == "__main__": main()
9ed64ddd0f5ca8ec035b53b0a699ec721e6206d9
screen_manager_test.py
screen_manager_test.py
from kivy.app import App from kivy.lang import Builder from kivy.uix.floatlayout import FloatLayout # Example from http://robertour.com/category/kivy/page/2/ Builder.load_string(""" #:import FadeTransition kivy.uix.screenmanager.FadeTransition <Phone>: AnchorLayout: anchor_x: 'right' anchor_y: 'center' ScreenManager: id: _screen_manager size_hint: .9, 1 Screen: name: 'screen1' Label: markup: True text: '[size=24]Welcome to [color=dd88ff]THE APP[/color][/size]' Screen: name: 'screen2' GridLayout: cols: 3 padding: 50 Button: text: "1" Button: text: "2" Button: text: "3" Button: text: "4" Button: text: "5" Button: text: "6" Button: text: "7" Button: text: "8" Button: text: "9" Button: text: "*" Button: text: "0" Button: text: "#" Screen: name: 'screen3' BoxLayout: Label: markup: True text: '[size=24]Welcome to [color=dd88ff]THE APP[/color][/size]' Button: text: 'Lampe aus' on_press: _screen_manager.current = 'screen1' AnchorLayout: anchor_x: 'left' anchor_y: 'center' BoxLayout: orientation: 'vertical' size_hint: .1, 1 spacing: 10 #spacing between children canvas: Color: rgba: 1,0,0,.5 Line: rectangle: self.x+1, self.y+1, self.width-1, self.height-1 Label: halign: 'center' text: 'SET' Button: text: 'CAM' size_hint: 1, .2 on_press: _screen_manager.transition = FadeTransition() _screen_manager.current = 'screen1' Button: text: 'SH' on_press: _screen_manager.transition.direction = 'right' _screen_manager.current = 'screen2' Button: text: 'CL' on_press: _screen_manager.current = 'screen3' Button: text: 'VB' on_press: _screen_manager.current = 'screen2' Label: halign: 'center' valign: 'bottom' text: '29.09.2017\\n14:28:31' """) class Phone(FloatLayout): pass class TestApp(App): def build(self): return Phone() if __name__ == '__main__': TestApp().run()
Add test-app which uses screen-manager
Add test-app which uses screen-manager
Python
apache-2.0
ThomasHangstoerfer/pyHomeCtrl
Add test-app which uses screen-manager
from kivy.app import App from kivy.lang import Builder from kivy.uix.floatlayout import FloatLayout # Example from http://robertour.com/category/kivy/page/2/ Builder.load_string(""" #:import FadeTransition kivy.uix.screenmanager.FadeTransition <Phone>: AnchorLayout: anchor_x: 'right' anchor_y: 'center' ScreenManager: id: _screen_manager size_hint: .9, 1 Screen: name: 'screen1' Label: markup: True text: '[size=24]Welcome to [color=dd88ff]THE APP[/color][/size]' Screen: name: 'screen2' GridLayout: cols: 3 padding: 50 Button: text: "1" Button: text: "2" Button: text: "3" Button: text: "4" Button: text: "5" Button: text: "6" Button: text: "7" Button: text: "8" Button: text: "9" Button: text: "*" Button: text: "0" Button: text: "#" Screen: name: 'screen3' BoxLayout: Label: markup: True text: '[size=24]Welcome to [color=dd88ff]THE APP[/color][/size]' Button: text: 'Lampe aus' on_press: _screen_manager.current = 'screen1' AnchorLayout: anchor_x: 'left' anchor_y: 'center' BoxLayout: orientation: 'vertical' size_hint: .1, 1 spacing: 10 #spacing between children canvas: Color: rgba: 1,0,0,.5 Line: rectangle: self.x+1, self.y+1, self.width-1, self.height-1 Label: halign: 'center' text: 'SET' Button: text: 'CAM' size_hint: 1, .2 on_press: _screen_manager.transition = FadeTransition() _screen_manager.current = 'screen1' Button: text: 'SH' on_press: _screen_manager.transition.direction = 'right' _screen_manager.current = 'screen2' Button: text: 'CL' on_press: _screen_manager.current = 'screen3' Button: text: 'VB' on_press: _screen_manager.current = 'screen2' Label: halign: 'center' valign: 'bottom' text: '29.09.2017\\n14:28:31' """) class Phone(FloatLayout): pass class TestApp(App): def build(self): return Phone() if __name__ == '__main__': TestApp().run()
<commit_before><commit_msg>Add test-app which uses screen-manager<commit_after>
from kivy.app import App from kivy.lang import Builder from kivy.uix.floatlayout import FloatLayout # Example from http://robertour.com/category/kivy/page/2/ Builder.load_string(""" #:import FadeTransition kivy.uix.screenmanager.FadeTransition <Phone>: AnchorLayout: anchor_x: 'right' anchor_y: 'center' ScreenManager: id: _screen_manager size_hint: .9, 1 Screen: name: 'screen1' Label: markup: True text: '[size=24]Welcome to [color=dd88ff]THE APP[/color][/size]' Screen: name: 'screen2' GridLayout: cols: 3 padding: 50 Button: text: "1" Button: text: "2" Button: text: "3" Button: text: "4" Button: text: "5" Button: text: "6" Button: text: "7" Button: text: "8" Button: text: "9" Button: text: "*" Button: text: "0" Button: text: "#" Screen: name: 'screen3' BoxLayout: Label: markup: True text: '[size=24]Welcome to [color=dd88ff]THE APP[/color][/size]' Button: text: 'Lampe aus' on_press: _screen_manager.current = 'screen1' AnchorLayout: anchor_x: 'left' anchor_y: 'center' BoxLayout: orientation: 'vertical' size_hint: .1, 1 spacing: 10 #spacing between children canvas: Color: rgba: 1,0,0,.5 Line: rectangle: self.x+1, self.y+1, self.width-1, self.height-1 Label: halign: 'center' text: 'SET' Button: text: 'CAM' size_hint: 1, .2 on_press: _screen_manager.transition = FadeTransition() _screen_manager.current = 'screen1' Button: text: 'SH' on_press: _screen_manager.transition.direction = 'right' _screen_manager.current = 'screen2' Button: text: 'CL' on_press: _screen_manager.current = 'screen3' Button: text: 'VB' on_press: _screen_manager.current = 'screen2' Label: halign: 'center' valign: 'bottom' text: '29.09.2017\\n14:28:31' """) class Phone(FloatLayout): pass class TestApp(App): def build(self): return Phone() if __name__ == '__main__': TestApp().run()
Add test-app which uses screen-manager from kivy.app import App from kivy.lang import Builder from kivy.uix.floatlayout import FloatLayout # Example from http://robertour.com/category/kivy/page/2/ Builder.load_string(""" #:import FadeTransition kivy.uix.screenmanager.FadeTransition <Phone>: AnchorLayout: anchor_x: 'right' anchor_y: 'center' ScreenManager: id: _screen_manager size_hint: .9, 1 Screen: name: 'screen1' Label: markup: True text: '[size=24]Welcome to [color=dd88ff]THE APP[/color][/size]' Screen: name: 'screen2' GridLayout: cols: 3 padding: 50 Button: text: "1" Button: text: "2" Button: text: "3" Button: text: "4" Button: text: "5" Button: text: "6" Button: text: "7" Button: text: "8" Button: text: "9" Button: text: "*" Button: text: "0" Button: text: "#" Screen: name: 'screen3' BoxLayout: Label: markup: True text: '[size=24]Welcome to [color=dd88ff]THE APP[/color][/size]' Button: text: 'Lampe aus' on_press: _screen_manager.current = 'screen1' AnchorLayout: anchor_x: 'left' anchor_y: 'center' BoxLayout: orientation: 'vertical' size_hint: .1, 1 spacing: 10 #spacing between children canvas: Color: rgba: 1,0,0,.5 Line: rectangle: self.x+1, self.y+1, self.width-1, self.height-1 Label: halign: 'center' text: 'SET' Button: text: 'CAM' size_hint: 1, .2 on_press: _screen_manager.transition = FadeTransition() _screen_manager.current = 'screen1' Button: text: 'SH' on_press: _screen_manager.transition.direction = 'right' _screen_manager.current = 'screen2' Button: text: 'CL' on_press: _screen_manager.current = 'screen3' Button: text: 'VB' on_press: _screen_manager.current = 'screen2' Label: halign: 'center' valign: 'bottom' text: '29.09.2017\\n14:28:31' """) class Phone(FloatLayout): pass class TestApp(App): def build(self): return Phone() if __name__ == '__main__': TestApp().run()
<commit_before><commit_msg>Add test-app which uses screen-manager<commit_after> from kivy.app import App from kivy.lang import Builder from kivy.uix.floatlayout import FloatLayout # Example from http://robertour.com/category/kivy/page/2/ Builder.load_string(""" #:import FadeTransition kivy.uix.screenmanager.FadeTransition <Phone>: AnchorLayout: anchor_x: 'right' anchor_y: 'center' ScreenManager: id: _screen_manager size_hint: .9, 1 Screen: name: 'screen1' Label: markup: True text: '[size=24]Welcome to [color=dd88ff]THE APP[/color][/size]' Screen: name: 'screen2' GridLayout: cols: 3 padding: 50 Button: text: "1" Button: text: "2" Button: text: "3" Button: text: "4" Button: text: "5" Button: text: "6" Button: text: "7" Button: text: "8" Button: text: "9" Button: text: "*" Button: text: "0" Button: text: "#" Screen: name: 'screen3' BoxLayout: Label: markup: True text: '[size=24]Welcome to [color=dd88ff]THE APP[/color][/size]' Button: text: 'Lampe aus' on_press: _screen_manager.current = 'screen1' AnchorLayout: anchor_x: 'left' anchor_y: 'center' BoxLayout: orientation: 'vertical' size_hint: .1, 1 spacing: 10 #spacing between children canvas: Color: rgba: 1,0,0,.5 Line: rectangle: self.x+1, self.y+1, self.width-1, self.height-1 Label: halign: 'center' text: 'SET' Button: text: 'CAM' size_hint: 1, .2 on_press: _screen_manager.transition = FadeTransition() _screen_manager.current = 'screen1' Button: text: 'SH' on_press: _screen_manager.transition.direction = 'right' _screen_manager.current = 'screen2' Button: text: 'CL' on_press: _screen_manager.current = 'screen3' Button: text: 'VB' on_press: _screen_manager.current = 'screen2' Label: halign: 'center' valign: 'bottom' text: '29.09.2017\\n14:28:31' """) class Phone(FloatLayout): pass class TestApp(App): def build(self): return Phone() if __name__ == '__main__': TestApp().run()
f928624343e04d1010bb7429a47a8f13032a9fea
cli.py
cli.py
#!/bin/python import argparse def parse_args(): parser = argparse.ArgumentParser() group_actions = parser.add_mutually_exclusive_group() group_actions.add_argument( "-r", "--remove", help="removes a note", action="store_true") group_actions.add_argument( "-m", "--modify", help="modifies a note", action="store_true") group_actions.add_argument( "-a", "--add", help="adds a note", action="store_true") group_actions.add_argument( "-n", "--note", help="the note you want to add/remove/modify", type=str) parser.add_argument( "-l", "--list", help="list notes", action="store_true") args = parser.parse_args() if __name__ == '__main__': parse_args()
Add argument parser to takenote
Add argument parser to takenote
Python
mit
nocternology/takenote
Add argument parser to takenote
#!/bin/python import argparse def parse_args(): parser = argparse.ArgumentParser() group_actions = parser.add_mutually_exclusive_group() group_actions.add_argument( "-r", "--remove", help="removes a note", action="store_true") group_actions.add_argument( "-m", "--modify", help="modifies a note", action="store_true") group_actions.add_argument( "-a", "--add", help="adds a note", action="store_true") group_actions.add_argument( "-n", "--note", help="the note you want to add/remove/modify", type=str) parser.add_argument( "-l", "--list", help="list notes", action="store_true") args = parser.parse_args() if __name__ == '__main__': parse_args()
<commit_before><commit_msg>Add argument parser to takenote<commit_after>
#!/bin/python import argparse def parse_args(): parser = argparse.ArgumentParser() group_actions = parser.add_mutually_exclusive_group() group_actions.add_argument( "-r", "--remove", help="removes a note", action="store_true") group_actions.add_argument( "-m", "--modify", help="modifies a note", action="store_true") group_actions.add_argument( "-a", "--add", help="adds a note", action="store_true") group_actions.add_argument( "-n", "--note", help="the note you want to add/remove/modify", type=str) parser.add_argument( "-l", "--list", help="list notes", action="store_true") args = parser.parse_args() if __name__ == '__main__': parse_args()
Add argument parser to takenote#!/bin/python import argparse def parse_args(): parser = argparse.ArgumentParser() group_actions = parser.add_mutually_exclusive_group() group_actions.add_argument( "-r", "--remove", help="removes a note", action="store_true") group_actions.add_argument( "-m", "--modify", help="modifies a note", action="store_true") group_actions.add_argument( "-a", "--add", help="adds a note", action="store_true") group_actions.add_argument( "-n", "--note", help="the note you want to add/remove/modify", type=str) parser.add_argument( "-l", "--list", help="list notes", action="store_true") args = parser.parse_args() if __name__ == '__main__': parse_args()
<commit_before><commit_msg>Add argument parser to takenote<commit_after>#!/bin/python import argparse def parse_args(): parser = argparse.ArgumentParser() group_actions = parser.add_mutually_exclusive_group() group_actions.add_argument( "-r", "--remove", help="removes a note", action="store_true") group_actions.add_argument( "-m", "--modify", help="modifies a note", action="store_true") group_actions.add_argument( "-a", "--add", help="adds a note", action="store_true") group_actions.add_argument( "-n", "--note", help="the note you want to add/remove/modify", type=str) parser.add_argument( "-l", "--list", help="list notes", action="store_true") args = parser.parse_args() if __name__ == '__main__': parse_args()
181e8e9375c13e8759539bb84171fce015fff66e
tools/text2stercus.py
tools/text2stercus.py
#!/usr/bin/env python import argparse def parse(text): stercus = '' for char in text: stercus += str(ord(char)) + ' . ' return stercus def main(): parser = argparse.ArgumentParser() parser.add_argument('text') args = parser.parse_args() print parse(args.text) if __name__ == '__main__': main()
Add convenience tool for converting string to stercus.
Add convenience tool for converting string to stercus.
Python
mit
adamheins/stercus
Add convenience tool for converting string to stercus.
#!/usr/bin/env python import argparse def parse(text): stercus = '' for char in text: stercus += str(ord(char)) + ' . ' return stercus def main(): parser = argparse.ArgumentParser() parser.add_argument('text') args = parser.parse_args() print parse(args.text) if __name__ == '__main__': main()
<commit_before><commit_msg>Add convenience tool for converting string to stercus.<commit_after>
#!/usr/bin/env python import argparse def parse(text): stercus = '' for char in text: stercus += str(ord(char)) + ' . ' return stercus def main(): parser = argparse.ArgumentParser() parser.add_argument('text') args = parser.parse_args() print parse(args.text) if __name__ == '__main__': main()
Add convenience tool for converting string to stercus.#!/usr/bin/env python import argparse def parse(text): stercus = '' for char in text: stercus += str(ord(char)) + ' . ' return stercus def main(): parser = argparse.ArgumentParser() parser.add_argument('text') args = parser.parse_args() print parse(args.text) if __name__ == '__main__': main()
<commit_before><commit_msg>Add convenience tool for converting string to stercus.<commit_after>#!/usr/bin/env python import argparse def parse(text): stercus = '' for char in text: stercus += str(ord(char)) + ' . ' return stercus def main(): parser = argparse.ArgumentParser() parser.add_argument('text') args = parser.parse_args() print parse(args.text) if __name__ == '__main__': main()
4e876b59745a67cf1fbcbaacf1ca1675c3e1946a
onetime/models.py
onetime/models.py
from django.db import models from django.contrib.auth.models import User class Key(models.Model): user = models.ForeignKey(User) key = models.CharField(max_length=40) created = models.DateTimeField(auto_now_add=True) usage_left = models.IntegerField(null=True, default=1) expires = models.DateTimeField(null=True) next = models.CharField(null=True, max_length=200)
from datetime import datetime from django.db import models from django.contrib.auth.models import User class Key(models.Model): user = models.ForeignKey(User) key = models.CharField(max_length=40) created = models.DateTimeField(auto_now_add=True) usage_left = models.IntegerField(null=True, default=1) expires = models.DateTimeField(null=True) next = models.CharField(null=True, max_length=200) def __unicode__(self): return '%s (%s)' % (self.key, self.user.username) def is_valid(self): if self.usage_left is not None and self.usage_left <= 0: return False if self.expires is not None and self.expires < datetime.now(): return False return True def update_usage(self): if self.usage_left is not None: self.usage_left -= 1 self.save()
Add validation and usage logics into the model
Add validation and usage logics into the model
Python
agpl-3.0
ISIFoundation/influenzanet-website,ISIFoundation/influenzanet-website,ISIFoundation/influenzanet-website,uploadcare/django-loginurl,ISIFoundation/influenzanet-website,ISIFoundation/influenzanet-website,fajran/django-loginurl,vanschelven/cmsplugin-journal,ISIFoundation/influenzanet-website,ISIFoundation/influenzanet-website
from django.db import models from django.contrib.auth.models import User class Key(models.Model): user = models.ForeignKey(User) key = models.CharField(max_length=40) created = models.DateTimeField(auto_now_add=True) usage_left = models.IntegerField(null=True, default=1) expires = models.DateTimeField(null=True) next = models.CharField(null=True, max_length=200) Add validation and usage logics into the model
from datetime import datetime from django.db import models from django.contrib.auth.models import User class Key(models.Model): user = models.ForeignKey(User) key = models.CharField(max_length=40) created = models.DateTimeField(auto_now_add=True) usage_left = models.IntegerField(null=True, default=1) expires = models.DateTimeField(null=True) next = models.CharField(null=True, max_length=200) def __unicode__(self): return '%s (%s)' % (self.key, self.user.username) def is_valid(self): if self.usage_left is not None and self.usage_left <= 0: return False if self.expires is not None and self.expires < datetime.now(): return False return True def update_usage(self): if self.usage_left is not None: self.usage_left -= 1 self.save()
<commit_before>from django.db import models from django.contrib.auth.models import User class Key(models.Model): user = models.ForeignKey(User) key = models.CharField(max_length=40) created = models.DateTimeField(auto_now_add=True) usage_left = models.IntegerField(null=True, default=1) expires = models.DateTimeField(null=True) next = models.CharField(null=True, max_length=200) <commit_msg>Add validation and usage logics into the model<commit_after>
from datetime import datetime from django.db import models from django.contrib.auth.models import User class Key(models.Model): user = models.ForeignKey(User) key = models.CharField(max_length=40) created = models.DateTimeField(auto_now_add=True) usage_left = models.IntegerField(null=True, default=1) expires = models.DateTimeField(null=True) next = models.CharField(null=True, max_length=200) def __unicode__(self): return '%s (%s)' % (self.key, self.user.username) def is_valid(self): if self.usage_left is not None and self.usage_left <= 0: return False if self.expires is not None and self.expires < datetime.now(): return False return True def update_usage(self): if self.usage_left is not None: self.usage_left -= 1 self.save()
from django.db import models from django.contrib.auth.models import User class Key(models.Model): user = models.ForeignKey(User) key = models.CharField(max_length=40) created = models.DateTimeField(auto_now_add=True) usage_left = models.IntegerField(null=True, default=1) expires = models.DateTimeField(null=True) next = models.CharField(null=True, max_length=200) Add validation and usage logics into the modelfrom datetime import datetime from django.db import models from django.contrib.auth.models import User class Key(models.Model): user = models.ForeignKey(User) key = models.CharField(max_length=40) created = models.DateTimeField(auto_now_add=True) usage_left = models.IntegerField(null=True, default=1) expires = models.DateTimeField(null=True) next = models.CharField(null=True, max_length=200) def __unicode__(self): return '%s (%s)' % (self.key, self.user.username) def is_valid(self): if self.usage_left is not None and self.usage_left <= 0: return False if self.expires is not None and self.expires < datetime.now(): return False return True def update_usage(self): if self.usage_left is not None: self.usage_left -= 1 self.save()
<commit_before>from django.db import models from django.contrib.auth.models import User class Key(models.Model): user = models.ForeignKey(User) key = models.CharField(max_length=40) created = models.DateTimeField(auto_now_add=True) usage_left = models.IntegerField(null=True, default=1) expires = models.DateTimeField(null=True) next = models.CharField(null=True, max_length=200) <commit_msg>Add validation and usage logics into the model<commit_after>from datetime import datetime from django.db import models from django.contrib.auth.models import User class Key(models.Model): user = models.ForeignKey(User) key = models.CharField(max_length=40) created = models.DateTimeField(auto_now_add=True) usage_left = models.IntegerField(null=True, default=1) expires = models.DateTimeField(null=True) next = models.CharField(null=True, max_length=200) def __unicode__(self): return '%s (%s)' % (self.key, self.user.username) def is_valid(self): if self.usage_left is not None and self.usage_left <= 0: return False if self.expires is not None and self.expires < datetime.now(): return False return True def update_usage(self): if self.usage_left is not None: self.usage_left -= 1 self.save()
5c2c1deacaa4a237e20d30ca4040a537a6381b9b
person/migrations/0009_auto_20200501_1504.py
person/migrations/0009_auto_20200501_1504.py
# Generated by Django 2.2.12 on 2020-05-01 13:04 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('person', '0008_person_datetime_updated'), ] operations = [ migrations.AlterField( model_name='person', name='forename', field=models.CharField(blank=True, db_index=True, max_length=200), ), migrations.AlterField( model_name='person', name='slug', field=models.SlugField(blank=True, default='', max_length=250), ), ]
Add migration for updated person fields
Add migration for updated person fields
Python
mit
openkamer/openkamer,openkamer/openkamer,openkamer/openkamer,openkamer/openkamer
Add migration for updated person fields
# Generated by Django 2.2.12 on 2020-05-01 13:04 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('person', '0008_person_datetime_updated'), ] operations = [ migrations.AlterField( model_name='person', name='forename', field=models.CharField(blank=True, db_index=True, max_length=200), ), migrations.AlterField( model_name='person', name='slug', field=models.SlugField(blank=True, default='', max_length=250), ), ]
<commit_before><commit_msg>Add migration for updated person fields<commit_after>
# Generated by Django 2.2.12 on 2020-05-01 13:04 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('person', '0008_person_datetime_updated'), ] operations = [ migrations.AlterField( model_name='person', name='forename', field=models.CharField(blank=True, db_index=True, max_length=200), ), migrations.AlterField( model_name='person', name='slug', field=models.SlugField(blank=True, default='', max_length=250), ), ]
Add migration for updated person fields# Generated by Django 2.2.12 on 2020-05-01 13:04 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('person', '0008_person_datetime_updated'), ] operations = [ migrations.AlterField( model_name='person', name='forename', field=models.CharField(blank=True, db_index=True, max_length=200), ), migrations.AlterField( model_name='person', name='slug', field=models.SlugField(blank=True, default='', max_length=250), ), ]
<commit_before><commit_msg>Add migration for updated person fields<commit_after># Generated by Django 2.2.12 on 2020-05-01 13:04 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('person', '0008_person_datetime_updated'), ] operations = [ migrations.AlterField( model_name='person', name='forename', field=models.CharField(blank=True, db_index=True, max_length=200), ), migrations.AlterField( model_name='person', name='slug', field=models.SlugField(blank=True, default='', max_length=250), ), ]
82a377e3d531543d4640bd595745e91b7f8f5c3f
sparqllib/querycomponent/tests/test_group.py
sparqllib/querycomponent/tests/test_group.py
import unittest import sparqllib from rdflib import BNode, Literal class TestGroup(unittest.TestCase): def setUp(self): self.group = sparqllib.querycomponent.Group() def test_add(self): self.assertEqual(self.group.components, []) triple = (BNode("subject"), BNode("relation"), Literal("Cats")) self.group += triple self.assertEqual(self.group.components, [triple]) def test_serialize(self): self.assertEqual(self.group.serialize(), "{}") triple = (BNode("subject"), BNode("relation"), Literal("Cats")) self.group += triple self.assertEqual(self.group.serialize(), "{?subject ?relation \"Cats\" .\n}")
Add tests for Group query component
Add tests for Group query component
Python
mit
ALSchwalm/sparqllib
Add tests for Group query component
import unittest import sparqllib from rdflib import BNode, Literal class TestGroup(unittest.TestCase): def setUp(self): self.group = sparqllib.querycomponent.Group() def test_add(self): self.assertEqual(self.group.components, []) triple = (BNode("subject"), BNode("relation"), Literal("Cats")) self.group += triple self.assertEqual(self.group.components, [triple]) def test_serialize(self): self.assertEqual(self.group.serialize(), "{}") triple = (BNode("subject"), BNode("relation"), Literal("Cats")) self.group += triple self.assertEqual(self.group.serialize(), "{?subject ?relation \"Cats\" .\n}")
<commit_before><commit_msg>Add tests for Group query component<commit_after>
import unittest import sparqllib from rdflib import BNode, Literal class TestGroup(unittest.TestCase): def setUp(self): self.group = sparqllib.querycomponent.Group() def test_add(self): self.assertEqual(self.group.components, []) triple = (BNode("subject"), BNode("relation"), Literal("Cats")) self.group += triple self.assertEqual(self.group.components, [triple]) def test_serialize(self): self.assertEqual(self.group.serialize(), "{}") triple = (BNode("subject"), BNode("relation"), Literal("Cats")) self.group += triple self.assertEqual(self.group.serialize(), "{?subject ?relation \"Cats\" .\n}")
Add tests for Group query component import unittest import sparqllib from rdflib import BNode, Literal class TestGroup(unittest.TestCase): def setUp(self): self.group = sparqllib.querycomponent.Group() def test_add(self): self.assertEqual(self.group.components, []) triple = (BNode("subject"), BNode("relation"), Literal("Cats")) self.group += triple self.assertEqual(self.group.components, [triple]) def test_serialize(self): self.assertEqual(self.group.serialize(), "{}") triple = (BNode("subject"), BNode("relation"), Literal("Cats")) self.group += triple self.assertEqual(self.group.serialize(), "{?subject ?relation \"Cats\" .\n}")
<commit_before><commit_msg>Add tests for Group query component<commit_after> import unittest import sparqllib from rdflib import BNode, Literal class TestGroup(unittest.TestCase): def setUp(self): self.group = sparqllib.querycomponent.Group() def test_add(self): self.assertEqual(self.group.components, []) triple = (BNode("subject"), BNode("relation"), Literal("Cats")) self.group += triple self.assertEqual(self.group.components, [triple]) def test_serialize(self): self.assertEqual(self.group.serialize(), "{}") triple = (BNode("subject"), BNode("relation"), Literal("Cats")) self.group += triple self.assertEqual(self.group.serialize(), "{?subject ?relation \"Cats\" .\n}")
f28e380b504596aa41245cb10eebebceae67d831
bin/brut-net.py
bin/brut-net.py
#!/usr/bin/env python3 """Converti un salaire brut annuel en un salaire net mensuel.""" import optparse def main(): p = optparse.OptionParser(description=__doc__) options, arguments = p.parse_args() annuel_brut = int(arguments[0]) mensuel_net = (annuel_brut - (annuel_brut * 23 / 100)) / 12 print('%d €' % round(mensuel_net)) if __name__ == '__main__': main()
Add new program to convert gross to net salary
Add new program to convert gross to net salary
Python
mit
mlcdf/dotfiles,mlcdf/dotfiles,mlcdf/dotfiles
Add new program to convert gross to net salary
#!/usr/bin/env python3 """Converti un salaire brut annuel en un salaire net mensuel.""" import optparse def main(): p = optparse.OptionParser(description=__doc__) options, arguments = p.parse_args() annuel_brut = int(arguments[0]) mensuel_net = (annuel_brut - (annuel_brut * 23 / 100)) / 12 print('%d €' % round(mensuel_net)) if __name__ == '__main__': main()
<commit_before><commit_msg>Add new program to convert gross to net salary<commit_after>
#!/usr/bin/env python3 """Converti un salaire brut annuel en un salaire net mensuel.""" import optparse def main(): p = optparse.OptionParser(description=__doc__) options, arguments = p.parse_args() annuel_brut = int(arguments[0]) mensuel_net = (annuel_brut - (annuel_brut * 23 / 100)) / 12 print('%d €' % round(mensuel_net)) if __name__ == '__main__': main()
Add new program to convert gross to net salary#!/usr/bin/env python3 """Converti un salaire brut annuel en un salaire net mensuel.""" import optparse def main(): p = optparse.OptionParser(description=__doc__) options, arguments = p.parse_args() annuel_brut = int(arguments[0]) mensuel_net = (annuel_brut - (annuel_brut * 23 / 100)) / 12 print('%d €' % round(mensuel_net)) if __name__ == '__main__': main()
<commit_before><commit_msg>Add new program to convert gross to net salary<commit_after>#!/usr/bin/env python3 """Converti un salaire brut annuel en un salaire net mensuel.""" import optparse def main(): p = optparse.OptionParser(description=__doc__) options, arguments = p.parse_args() annuel_brut = int(arguments[0]) mensuel_net = (annuel_brut - (annuel_brut * 23 / 100)) / 12 print('%d €' % round(mensuel_net)) if __name__ == '__main__': main()
ccdb1b2f87c700e283b2ca51aa94408027561b0d
api.py
api.py
import json from django.conf.urls import url from django.db.models import ObjectDoesNotExist from django.http import Http404, HttpResponse from django.utils.decorators import classonlymethod from django.views import generic from django.views.decorators.csrf import csrf_exempt class Resource(generic.View): """ Request-response cycle ====================== - Incoming request with a certain HTTP verb - Standardize incoming data (PUTted JSON, POSTed multipart, whatever) - Process verbs - GET & HEAD - list - detail - POST - process - create - PUT (Complete resource) - replace or create - PATCH - patch, incomplete resources allowed - DELETE - obvious :-) - OPTIONS (unsupported) - TRACE (unsupported) """ model = None queryset = None http_method_names = ['get', 'post', 'put', 'delete', 'head', 'patch'] @classonlymethod def as_url(cls, prefix, **initkwargs): """ Usage:: urlpatterns = patterns('', Resource.as_url('v1/product/', model=Product), ) """ return url(r'^%s(?:(?P<pk>\d+)/)?$' % prefix, csrf_exempt(cls.as_view(**initkwargs))) def dispatch(self, request, *args, **kwargs): # Try to dispatch to the right method; if a method doesn't exist, # defer to the error handler. Also defer to the error handler if the # request method isn't on the approved list. if request.method.lower() in self.http_method_names: handler = getattr(self, request.method.lower(), self.http_method_not_allowed) else: handler = self.http_method_not_allowed self.request = request self.args = args self.kwargs = kwargs self.unserialize_request() return self.serialize_response(handler(request, *self.args, **self.kwargs)) def unserialize_request(self): """ This method standardizes various aspects of the incoming request, f.e. decoding of JSON requests etc. The "real" processing methods should not have to distinguish between varying request types anymore. """ pass def serialize_response(self, response): return HttpResponse(json.dumps(response), mimetype='application/json') def get_query_set(self): if self.queryset: return self.queryset._clone() elif self.model: return self.model._default_manager.all() def serialize_instance(self, instance): return { 'pk': instance.pk, '__unicode__': unicode(instance), } def get(self, request, *args, **kwargs): queryset = self.get_query_set() if kwargs.get('pk'): instance = queryset.get(pk=kwargs.get('pk')) return self.serialize_instance(instance) # TODO pagination, filtering return [self.serialize_instance(instance) for instance in queryset]
Add a really simple resource as beginning of a custom API implementation
Add a really simple resource as beginning of a custom API implementation
Python
bsd-3-clause
matthiask/towel,matthiask/towel,matthiask/towel,matthiask/towel
Add a really simple resource as beginning of a custom API implementation
import json from django.conf.urls import url from django.db.models import ObjectDoesNotExist from django.http import Http404, HttpResponse from django.utils.decorators import classonlymethod from django.views import generic from django.views.decorators.csrf import csrf_exempt class Resource(generic.View): """ Request-response cycle ====================== - Incoming request with a certain HTTP verb - Standardize incoming data (PUTted JSON, POSTed multipart, whatever) - Process verbs - GET & HEAD - list - detail - POST - process - create - PUT (Complete resource) - replace or create - PATCH - patch, incomplete resources allowed - DELETE - obvious :-) - OPTIONS (unsupported) - TRACE (unsupported) """ model = None queryset = None http_method_names = ['get', 'post', 'put', 'delete', 'head', 'patch'] @classonlymethod def as_url(cls, prefix, **initkwargs): """ Usage:: urlpatterns = patterns('', Resource.as_url('v1/product/', model=Product), ) """ return url(r'^%s(?:(?P<pk>\d+)/)?$' % prefix, csrf_exempt(cls.as_view(**initkwargs))) def dispatch(self, request, *args, **kwargs): # Try to dispatch to the right method; if a method doesn't exist, # defer to the error handler. Also defer to the error handler if the # request method isn't on the approved list. if request.method.lower() in self.http_method_names: handler = getattr(self, request.method.lower(), self.http_method_not_allowed) else: handler = self.http_method_not_allowed self.request = request self.args = args self.kwargs = kwargs self.unserialize_request() return self.serialize_response(handler(request, *self.args, **self.kwargs)) def unserialize_request(self): """ This method standardizes various aspects of the incoming request, f.e. decoding of JSON requests etc. The "real" processing methods should not have to distinguish between varying request types anymore. """ pass def serialize_response(self, response): return HttpResponse(json.dumps(response), mimetype='application/json') def get_query_set(self): if self.queryset: return self.queryset._clone() elif self.model: return self.model._default_manager.all() def serialize_instance(self, instance): return { 'pk': instance.pk, '__unicode__': unicode(instance), } def get(self, request, *args, **kwargs): queryset = self.get_query_set() if kwargs.get('pk'): instance = queryset.get(pk=kwargs.get('pk')) return self.serialize_instance(instance) # TODO pagination, filtering return [self.serialize_instance(instance) for instance in queryset]
<commit_before><commit_msg>Add a really simple resource as beginning of a custom API implementation<commit_after>
import json from django.conf.urls import url from django.db.models import ObjectDoesNotExist from django.http import Http404, HttpResponse from django.utils.decorators import classonlymethod from django.views import generic from django.views.decorators.csrf import csrf_exempt class Resource(generic.View): """ Request-response cycle ====================== - Incoming request with a certain HTTP verb - Standardize incoming data (PUTted JSON, POSTed multipart, whatever) - Process verbs - GET & HEAD - list - detail - POST - process - create - PUT (Complete resource) - replace or create - PATCH - patch, incomplete resources allowed - DELETE - obvious :-) - OPTIONS (unsupported) - TRACE (unsupported) """ model = None queryset = None http_method_names = ['get', 'post', 'put', 'delete', 'head', 'patch'] @classonlymethod def as_url(cls, prefix, **initkwargs): """ Usage:: urlpatterns = patterns('', Resource.as_url('v1/product/', model=Product), ) """ return url(r'^%s(?:(?P<pk>\d+)/)?$' % prefix, csrf_exempt(cls.as_view(**initkwargs))) def dispatch(self, request, *args, **kwargs): # Try to dispatch to the right method; if a method doesn't exist, # defer to the error handler. Also defer to the error handler if the # request method isn't on the approved list. if request.method.lower() in self.http_method_names: handler = getattr(self, request.method.lower(), self.http_method_not_allowed) else: handler = self.http_method_not_allowed self.request = request self.args = args self.kwargs = kwargs self.unserialize_request() return self.serialize_response(handler(request, *self.args, **self.kwargs)) def unserialize_request(self): """ This method standardizes various aspects of the incoming request, f.e. decoding of JSON requests etc. The "real" processing methods should not have to distinguish between varying request types anymore. """ pass def serialize_response(self, response): return HttpResponse(json.dumps(response), mimetype='application/json') def get_query_set(self): if self.queryset: return self.queryset._clone() elif self.model: return self.model._default_manager.all() def serialize_instance(self, instance): return { 'pk': instance.pk, '__unicode__': unicode(instance), } def get(self, request, *args, **kwargs): queryset = self.get_query_set() if kwargs.get('pk'): instance = queryset.get(pk=kwargs.get('pk')) return self.serialize_instance(instance) # TODO pagination, filtering return [self.serialize_instance(instance) for instance in queryset]
Add a really simple resource as beginning of a custom API implementationimport json from django.conf.urls import url from django.db.models import ObjectDoesNotExist from django.http import Http404, HttpResponse from django.utils.decorators import classonlymethod from django.views import generic from django.views.decorators.csrf import csrf_exempt class Resource(generic.View): """ Request-response cycle ====================== - Incoming request with a certain HTTP verb - Standardize incoming data (PUTted JSON, POSTed multipart, whatever) - Process verbs - GET & HEAD - list - detail - POST - process - create - PUT (Complete resource) - replace or create - PATCH - patch, incomplete resources allowed - DELETE - obvious :-) - OPTIONS (unsupported) - TRACE (unsupported) """ model = None queryset = None http_method_names = ['get', 'post', 'put', 'delete', 'head', 'patch'] @classonlymethod def as_url(cls, prefix, **initkwargs): """ Usage:: urlpatterns = patterns('', Resource.as_url('v1/product/', model=Product), ) """ return url(r'^%s(?:(?P<pk>\d+)/)?$' % prefix, csrf_exempt(cls.as_view(**initkwargs))) def dispatch(self, request, *args, **kwargs): # Try to dispatch to the right method; if a method doesn't exist, # defer to the error handler. Also defer to the error handler if the # request method isn't on the approved list. if request.method.lower() in self.http_method_names: handler = getattr(self, request.method.lower(), self.http_method_not_allowed) else: handler = self.http_method_not_allowed self.request = request self.args = args self.kwargs = kwargs self.unserialize_request() return self.serialize_response(handler(request, *self.args, **self.kwargs)) def unserialize_request(self): """ This method standardizes various aspects of the incoming request, f.e. decoding of JSON requests etc. The "real" processing methods should not have to distinguish between varying request types anymore. """ pass def serialize_response(self, response): return HttpResponse(json.dumps(response), mimetype='application/json') def get_query_set(self): if self.queryset: return self.queryset._clone() elif self.model: return self.model._default_manager.all() def serialize_instance(self, instance): return { 'pk': instance.pk, '__unicode__': unicode(instance), } def get(self, request, *args, **kwargs): queryset = self.get_query_set() if kwargs.get('pk'): instance = queryset.get(pk=kwargs.get('pk')) return self.serialize_instance(instance) # TODO pagination, filtering return [self.serialize_instance(instance) for instance in queryset]
<commit_before><commit_msg>Add a really simple resource as beginning of a custom API implementation<commit_after>import json from django.conf.urls import url from django.db.models import ObjectDoesNotExist from django.http import Http404, HttpResponse from django.utils.decorators import classonlymethod from django.views import generic from django.views.decorators.csrf import csrf_exempt class Resource(generic.View): """ Request-response cycle ====================== - Incoming request with a certain HTTP verb - Standardize incoming data (PUTted JSON, POSTed multipart, whatever) - Process verbs - GET & HEAD - list - detail - POST - process - create - PUT (Complete resource) - replace or create - PATCH - patch, incomplete resources allowed - DELETE - obvious :-) - OPTIONS (unsupported) - TRACE (unsupported) """ model = None queryset = None http_method_names = ['get', 'post', 'put', 'delete', 'head', 'patch'] @classonlymethod def as_url(cls, prefix, **initkwargs): """ Usage:: urlpatterns = patterns('', Resource.as_url('v1/product/', model=Product), ) """ return url(r'^%s(?:(?P<pk>\d+)/)?$' % prefix, csrf_exempt(cls.as_view(**initkwargs))) def dispatch(self, request, *args, **kwargs): # Try to dispatch to the right method; if a method doesn't exist, # defer to the error handler. Also defer to the error handler if the # request method isn't on the approved list. if request.method.lower() in self.http_method_names: handler = getattr(self, request.method.lower(), self.http_method_not_allowed) else: handler = self.http_method_not_allowed self.request = request self.args = args self.kwargs = kwargs self.unserialize_request() return self.serialize_response(handler(request, *self.args, **self.kwargs)) def unserialize_request(self): """ This method standardizes various aspects of the incoming request, f.e. decoding of JSON requests etc. The "real" processing methods should not have to distinguish between varying request types anymore. """ pass def serialize_response(self, response): return HttpResponse(json.dumps(response), mimetype='application/json') def get_query_set(self): if self.queryset: return self.queryset._clone() elif self.model: return self.model._default_manager.all() def serialize_instance(self, instance): return { 'pk': instance.pk, '__unicode__': unicode(instance), } def get(self, request, *args, **kwargs): queryset = self.get_query_set() if kwargs.get('pk'): instance = queryset.get(pk=kwargs.get('pk')) return self.serialize_instance(instance) # TODO pagination, filtering return [self.serialize_instance(instance) for instance in queryset]
8461b21cf4b538f2ec46a9a9b6674eff9072e84d
cli.py
cli.py
#!/usr/bin/env python import sys from pprint import pprint from argparse import ArgumentParser from minecraft_query import MinecraftQuery def main(): parser = ArgumentParser(description="Query status of Minecraft multiplayer server", epilog="Exit status: 0 if the server can be reached, otherwise nonzero." ) parser.add_argument("host", help="target hostname") parser.add_argument("-q", "--quiet", action='store_true', default=False, help='don\'t print anything, just check if the server is running') parser.add_argument("-p", "--port", type=int, default=25565, help='UDP port of server\'s "query" service [25565]') parser.add_argument("-r", "--retries", type=int, default=3, help='retry query at most this number of times [3]') parser.add_argument("-t", "--timeout", type=int, default=10, help='retry timeout in seconds [10]') options = parser.parse_args() try: query = MinecraftQuery(options.host, options.port, timeout=options.timeout, retries=options.retries) server_data = query.get_rules() except socket.error as e: if not options.quiet: print "socket exception caught:", e.message print "Server is down or unreachable." sys.exit(1) if not options.quiet: print "Server response data:" pprint(server_data) sys.exit(0) if __name__=="__main__": main()
Add friendly and scriptable CLI
Add friendly and scriptable CLI
Python
apache-2.0
CloudBotIRC/mcstatus,Trax-/mcstatus
Add friendly and scriptable CLI
#!/usr/bin/env python import sys from pprint import pprint from argparse import ArgumentParser from minecraft_query import MinecraftQuery def main(): parser = ArgumentParser(description="Query status of Minecraft multiplayer server", epilog="Exit status: 0 if the server can be reached, otherwise nonzero." ) parser.add_argument("host", help="target hostname") parser.add_argument("-q", "--quiet", action='store_true', default=False, help='don\'t print anything, just check if the server is running') parser.add_argument("-p", "--port", type=int, default=25565, help='UDP port of server\'s "query" service [25565]') parser.add_argument("-r", "--retries", type=int, default=3, help='retry query at most this number of times [3]') parser.add_argument("-t", "--timeout", type=int, default=10, help='retry timeout in seconds [10]') options = parser.parse_args() try: query = MinecraftQuery(options.host, options.port, timeout=options.timeout, retries=options.retries) server_data = query.get_rules() except socket.error as e: if not options.quiet: print "socket exception caught:", e.message print "Server is down or unreachable." sys.exit(1) if not options.quiet: print "Server response data:" pprint(server_data) sys.exit(0) if __name__=="__main__": main()
<commit_before><commit_msg>Add friendly and scriptable CLI<commit_after>
#!/usr/bin/env python import sys from pprint import pprint from argparse import ArgumentParser from minecraft_query import MinecraftQuery def main(): parser = ArgumentParser(description="Query status of Minecraft multiplayer server", epilog="Exit status: 0 if the server can be reached, otherwise nonzero." ) parser.add_argument("host", help="target hostname") parser.add_argument("-q", "--quiet", action='store_true', default=False, help='don\'t print anything, just check if the server is running') parser.add_argument("-p", "--port", type=int, default=25565, help='UDP port of server\'s "query" service [25565]') parser.add_argument("-r", "--retries", type=int, default=3, help='retry query at most this number of times [3]') parser.add_argument("-t", "--timeout", type=int, default=10, help='retry timeout in seconds [10]') options = parser.parse_args() try: query = MinecraftQuery(options.host, options.port, timeout=options.timeout, retries=options.retries) server_data = query.get_rules() except socket.error as e: if not options.quiet: print "socket exception caught:", e.message print "Server is down or unreachable." sys.exit(1) if not options.quiet: print "Server response data:" pprint(server_data) sys.exit(0) if __name__=="__main__": main()
Add friendly and scriptable CLI#!/usr/bin/env python import sys from pprint import pprint from argparse import ArgumentParser from minecraft_query import MinecraftQuery def main(): parser = ArgumentParser(description="Query status of Minecraft multiplayer server", epilog="Exit status: 0 if the server can be reached, otherwise nonzero." ) parser.add_argument("host", help="target hostname") parser.add_argument("-q", "--quiet", action='store_true', default=False, help='don\'t print anything, just check if the server is running') parser.add_argument("-p", "--port", type=int, default=25565, help='UDP port of server\'s "query" service [25565]') parser.add_argument("-r", "--retries", type=int, default=3, help='retry query at most this number of times [3]') parser.add_argument("-t", "--timeout", type=int, default=10, help='retry timeout in seconds [10]') options = parser.parse_args() try: query = MinecraftQuery(options.host, options.port, timeout=options.timeout, retries=options.retries) server_data = query.get_rules() except socket.error as e: if not options.quiet: print "socket exception caught:", e.message print "Server is down or unreachable." sys.exit(1) if not options.quiet: print "Server response data:" pprint(server_data) sys.exit(0) if __name__=="__main__": main()
<commit_before><commit_msg>Add friendly and scriptable CLI<commit_after>#!/usr/bin/env python import sys from pprint import pprint from argparse import ArgumentParser from minecraft_query import MinecraftQuery def main(): parser = ArgumentParser(description="Query status of Minecraft multiplayer server", epilog="Exit status: 0 if the server can be reached, otherwise nonzero." ) parser.add_argument("host", help="target hostname") parser.add_argument("-q", "--quiet", action='store_true', default=False, help='don\'t print anything, just check if the server is running') parser.add_argument("-p", "--port", type=int, default=25565, help='UDP port of server\'s "query" service [25565]') parser.add_argument("-r", "--retries", type=int, default=3, help='retry query at most this number of times [3]') parser.add_argument("-t", "--timeout", type=int, default=10, help='retry timeout in seconds [10]') options = parser.parse_args() try: query = MinecraftQuery(options.host, options.port, timeout=options.timeout, retries=options.retries) server_data = query.get_rules() except socket.error as e: if not options.quiet: print "socket exception caught:", e.message print "Server is down or unreachable." sys.exit(1) if not options.quiet: print "Server response data:" pprint(server_data) sys.exit(0) if __name__=="__main__": main()
cd2f9ec0c8ffbdfda4620b9edf57ab839b286ddf
bluebottle/funding_stripe/migrations/0004_auto_20200318_1504.py
bluebottle/funding_stripe/migrations/0004_auto_20200318_1504.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2020-03-18 14:04 from __future__ import unicode_literals from django.db import migrations from bluebottle.funding_stripe.models import get_specs def set_eventually_due(apps, schema_editor): Account = apps.get_model('funding_stripe', 'StripePayoutAccount') for account in Account.objects.all(): spec = get_specs(account.country) account.eventually_due = ( spec['verification_fields']['individual']['minimum'] + spec['verification_fields']['individual']['additional'] ) account.save() class Migration(migrations.Migration): dependencies = [ ('funding_stripe', '0003_auto_20200317_1601'), ] operations = [ migrations.RunPython(set_eventually_due) ]
Add eventually_due fields in migration
Add eventually_due fields in migration
Python
bsd-3-clause
onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
Add eventually_due fields in migration
# -*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2020-03-18 14:04 from __future__ import unicode_literals from django.db import migrations from bluebottle.funding_stripe.models import get_specs def set_eventually_due(apps, schema_editor): Account = apps.get_model('funding_stripe', 'StripePayoutAccount') for account in Account.objects.all(): spec = get_specs(account.country) account.eventually_due = ( spec['verification_fields']['individual']['minimum'] + spec['verification_fields']['individual']['additional'] ) account.save() class Migration(migrations.Migration): dependencies = [ ('funding_stripe', '0003_auto_20200317_1601'), ] operations = [ migrations.RunPython(set_eventually_due) ]
<commit_before><commit_msg>Add eventually_due fields in migration<commit_after>
# -*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2020-03-18 14:04 from __future__ import unicode_literals from django.db import migrations from bluebottle.funding_stripe.models import get_specs def set_eventually_due(apps, schema_editor): Account = apps.get_model('funding_stripe', 'StripePayoutAccount') for account in Account.objects.all(): spec = get_specs(account.country) account.eventually_due = ( spec['verification_fields']['individual']['minimum'] + spec['verification_fields']['individual']['additional'] ) account.save() class Migration(migrations.Migration): dependencies = [ ('funding_stripe', '0003_auto_20200317_1601'), ] operations = [ migrations.RunPython(set_eventually_due) ]
Add eventually_due fields in migration# -*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2020-03-18 14:04 from __future__ import unicode_literals from django.db import migrations from bluebottle.funding_stripe.models import get_specs def set_eventually_due(apps, schema_editor): Account = apps.get_model('funding_stripe', 'StripePayoutAccount') for account in Account.objects.all(): spec = get_specs(account.country) account.eventually_due = ( spec['verification_fields']['individual']['minimum'] + spec['verification_fields']['individual']['additional'] ) account.save() class Migration(migrations.Migration): dependencies = [ ('funding_stripe', '0003_auto_20200317_1601'), ] operations = [ migrations.RunPython(set_eventually_due) ]
<commit_before><commit_msg>Add eventually_due fields in migration<commit_after># -*- coding: utf-8 -*- # Generated by Django 1.11.15 on 2020-03-18 14:04 from __future__ import unicode_literals from django.db import migrations from bluebottle.funding_stripe.models import get_specs def set_eventually_due(apps, schema_editor): Account = apps.get_model('funding_stripe', 'StripePayoutAccount') for account in Account.objects.all(): spec = get_specs(account.country) account.eventually_due = ( spec['verification_fields']['individual']['minimum'] + spec['verification_fields']['individual']['additional'] ) account.save() class Migration(migrations.Migration): dependencies = [ ('funding_stripe', '0003_auto_20200317_1601'), ] operations = [ migrations.RunPython(set_eventually_due) ]
46883c5c0de8cf00e75654ae2e577ddc404c8b27
api/bots/giphy/test_giphy.py
api/bots/giphy/test_giphy.py
#!/usr/bin/env python from __future__ import absolute_import from __future__ import print_function import os import sys import json our_dir = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(0, os.path.normpath(os.path.join(our_dir))) # For dev setups, we can find the API in the repo itself. if os.path.exists(os.path.join(our_dir, '..')): sys.path.insert(0, '..') from bots_test_lib import BotTestCase from bots.giphy import giphy def get_http_response_json(gif_url): response_json = { 'meta': { 'status': 200 }, 'data': { 'images': { 'original': { 'url': gif_url } } } } return response_json def get_bot_response(gif_url): return ('[Click to enlarge](%s)' '[](/static/images/interactive-bot/giphy/powered-by-giphy.png)' % (gif_url)) def get_http_request(keyword): return { 'api_url': giphy.GIPHY_TRANSLATE_API, 'params': { 's': keyword, 'api_key': giphy.get_giphy_api_key_from_config() } } class TestGiphyBot(BotTestCase): bot_name = "giphy" def test_bot(self): # This message calls `send_reply` function of BotHandlerApi keyword = "Hello" gif_url = "https://media4.giphy.com/media/3o6ZtpxSZbQRRnwCKQ/giphy.gif" expectations = { keyword: get_bot_response(gif_url) } self.check_expected_responses( expectations=expectations, http_request=get_http_request(keyword), http_response=get_http_response_json(gif_url) )
Add test file for 'giphy' bot.
bots: Add test file for 'giphy' bot. This bot replies with different gif for the same query that is provided. Mocked a definite response from requests.get function.
Python
apache-2.0
hackerkid/zulip,jackrzhang/zulip,eeshangarg/zulip,brainwane/zulip,vaidap/zulip,timabbott/zulip,vabs22/zulip,shubhamdhama/zulip,mahim97/zulip,eeshangarg/zulip,brockwhittaker/zulip,vaidap/zulip,showell/zulip,rht/zulip,eeshangarg/zulip,brainwane/zulip,rht/zulip,showell/zulip,dhcrzf/zulip,rishig/zulip,verma-varsha/zulip,showell/zulip,rht/zulip,andersk/zulip,shubhamdhama/zulip,hackerkid/zulip,eeshangarg/zulip,brockwhittaker/zulip,jrowan/zulip,timabbott/zulip,synicalsyntax/zulip,brainwane/zulip,verma-varsha/zulip,shubhamdhama/zulip,jackrzhang/zulip,tommyip/zulip,punchagan/zulip,synicalsyntax/zulip,shubhamdhama/zulip,vaidap/zulip,andersk/zulip,brockwhittaker/zulip,kou/zulip,zulip/zulip,timabbott/zulip,amanharitsh123/zulip,Galexrt/zulip,tommyip/zulip,punchagan/zulip,verma-varsha/zulip,vabs22/zulip,brainwane/zulip,showell/zulip,shubhamdhama/zulip,brainwane/zulip,eeshangarg/zulip,jrowan/zulip,showell/zulip,eeshangarg/zulip,zulip/zulip,andersk/zulip,kou/zulip,mahim97/zulip,zulip/zulip,tommyip/zulip,zulip/zulip,vaidap/zulip,synicalsyntax/zulip,jackrzhang/zulip,synicalsyntax/zulip,vabs22/zulip,punchagan/zulip,andersk/zulip,andersk/zulip,Galexrt/zulip,dhcrzf/zulip,hackerkid/zulip,mahim97/zulip,andersk/zulip,Galexrt/zulip,rht/zulip,rishig/zulip,rht/zulip,amanharitsh123/zulip,dhcrzf/zulip,vabs22/zulip,vaidap/zulip,zulip/zulip,rishig/zulip,mahim97/zulip,mahim97/zulip,synicalsyntax/zulip,dhcrzf/zulip,jrowan/zulip,kou/zulip,rht/zulip,vabs22/zulip,rishig/zulip,showell/zulip,brockwhittaker/zulip,hackerkid/zulip,synicalsyntax/zulip,rishig/zulip,shubhamdhama/zulip,showell/zulip,brainwane/zulip,punchagan/zulip,jackrzhang/zulip,punchagan/zulip,eeshangarg/zulip,Galexrt/zulip,verma-varsha/zulip,hackerkid/zulip,jackrzhang/zulip,zulip/zulip,timabbott/zulip,dhcrzf/zulip,kou/zulip,brockwhittaker/zulip,verma-varsha/zulip,mahim97/zulip,timabbott/zulip,jrowan/zulip,vabs22/zulip,punchagan/zulip,kou/zulip,Galexrt/zulip,amanharitsh123/zulip,timabbott/zulip,rishig/zulip,rishig/zulip,zulip/zulip,tommyip/zulip,kou/zulip,hackerkid/zulip,hackerkid/zulip,amanharitsh123/zulip,tommyip/zulip,kou/zulip,andersk/zulip,tommyip/zulip,shubhamdhama/zulip,jrowan/zulip,jackrzhang/zulip,Galexrt/zulip,synicalsyntax/zulip,brockwhittaker/zulip,amanharitsh123/zulip,dhcrzf/zulip,punchagan/zulip,tommyip/zulip,jackrzhang/zulip,jrowan/zulip,verma-varsha/zulip,vaidap/zulip,timabbott/zulip,brainwane/zulip,amanharitsh123/zulip,rht/zulip,Galexrt/zulip,dhcrzf/zulip
bots: Add test file for 'giphy' bot. This bot replies with different gif for the same query that is provided. Mocked a definite response from requests.get function.
#!/usr/bin/env python from __future__ import absolute_import from __future__ import print_function import os import sys import json our_dir = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(0, os.path.normpath(os.path.join(our_dir))) # For dev setups, we can find the API in the repo itself. if os.path.exists(os.path.join(our_dir, '..')): sys.path.insert(0, '..') from bots_test_lib import BotTestCase from bots.giphy import giphy def get_http_response_json(gif_url): response_json = { 'meta': { 'status': 200 }, 'data': { 'images': { 'original': { 'url': gif_url } } } } return response_json def get_bot_response(gif_url): return ('[Click to enlarge](%s)' '[](/static/images/interactive-bot/giphy/powered-by-giphy.png)' % (gif_url)) def get_http_request(keyword): return { 'api_url': giphy.GIPHY_TRANSLATE_API, 'params': { 's': keyword, 'api_key': giphy.get_giphy_api_key_from_config() } } class TestGiphyBot(BotTestCase): bot_name = "giphy" def test_bot(self): # This message calls `send_reply` function of BotHandlerApi keyword = "Hello" gif_url = "https://media4.giphy.com/media/3o6ZtpxSZbQRRnwCKQ/giphy.gif" expectations = { keyword: get_bot_response(gif_url) } self.check_expected_responses( expectations=expectations, http_request=get_http_request(keyword), http_response=get_http_response_json(gif_url) )
<commit_before><commit_msg>bots: Add test file for 'giphy' bot. This bot replies with different gif for the same query that is provided. Mocked a definite response from requests.get function.<commit_after>
#!/usr/bin/env python from __future__ import absolute_import from __future__ import print_function import os import sys import json our_dir = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(0, os.path.normpath(os.path.join(our_dir))) # For dev setups, we can find the API in the repo itself. if os.path.exists(os.path.join(our_dir, '..')): sys.path.insert(0, '..') from bots_test_lib import BotTestCase from bots.giphy import giphy def get_http_response_json(gif_url): response_json = { 'meta': { 'status': 200 }, 'data': { 'images': { 'original': { 'url': gif_url } } } } return response_json def get_bot_response(gif_url): return ('[Click to enlarge](%s)' '[](/static/images/interactive-bot/giphy/powered-by-giphy.png)' % (gif_url)) def get_http_request(keyword): return { 'api_url': giphy.GIPHY_TRANSLATE_API, 'params': { 's': keyword, 'api_key': giphy.get_giphy_api_key_from_config() } } class TestGiphyBot(BotTestCase): bot_name = "giphy" def test_bot(self): # This message calls `send_reply` function of BotHandlerApi keyword = "Hello" gif_url = "https://media4.giphy.com/media/3o6ZtpxSZbQRRnwCKQ/giphy.gif" expectations = { keyword: get_bot_response(gif_url) } self.check_expected_responses( expectations=expectations, http_request=get_http_request(keyword), http_response=get_http_response_json(gif_url) )
bots: Add test file for 'giphy' bot. This bot replies with different gif for the same query that is provided. Mocked a definite response from requests.get function.#!/usr/bin/env python from __future__ import absolute_import from __future__ import print_function import os import sys import json our_dir = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(0, os.path.normpath(os.path.join(our_dir))) # For dev setups, we can find the API in the repo itself. if os.path.exists(os.path.join(our_dir, '..')): sys.path.insert(0, '..') from bots_test_lib import BotTestCase from bots.giphy import giphy def get_http_response_json(gif_url): response_json = { 'meta': { 'status': 200 }, 'data': { 'images': { 'original': { 'url': gif_url } } } } return response_json def get_bot_response(gif_url): return ('[Click to enlarge](%s)' '[](/static/images/interactive-bot/giphy/powered-by-giphy.png)' % (gif_url)) def get_http_request(keyword): return { 'api_url': giphy.GIPHY_TRANSLATE_API, 'params': { 's': keyword, 'api_key': giphy.get_giphy_api_key_from_config() } } class TestGiphyBot(BotTestCase): bot_name = "giphy" def test_bot(self): # This message calls `send_reply` function of BotHandlerApi keyword = "Hello" gif_url = "https://media4.giphy.com/media/3o6ZtpxSZbQRRnwCKQ/giphy.gif" expectations = { keyword: get_bot_response(gif_url) } self.check_expected_responses( expectations=expectations, http_request=get_http_request(keyword), http_response=get_http_response_json(gif_url) )
<commit_before><commit_msg>bots: Add test file for 'giphy' bot. This bot replies with different gif for the same query that is provided. Mocked a definite response from requests.get function.<commit_after>#!/usr/bin/env python from __future__ import absolute_import from __future__ import print_function import os import sys import json our_dir = os.path.dirname(os.path.abspath(__file__)) sys.path.insert(0, os.path.normpath(os.path.join(our_dir))) # For dev setups, we can find the API in the repo itself. if os.path.exists(os.path.join(our_dir, '..')): sys.path.insert(0, '..') from bots_test_lib import BotTestCase from bots.giphy import giphy def get_http_response_json(gif_url): response_json = { 'meta': { 'status': 200 }, 'data': { 'images': { 'original': { 'url': gif_url } } } } return response_json def get_bot_response(gif_url): return ('[Click to enlarge](%s)' '[](/static/images/interactive-bot/giphy/powered-by-giphy.png)' % (gif_url)) def get_http_request(keyword): return { 'api_url': giphy.GIPHY_TRANSLATE_API, 'params': { 's': keyword, 'api_key': giphy.get_giphy_api_key_from_config() } } class TestGiphyBot(BotTestCase): bot_name = "giphy" def test_bot(self): # This message calls `send_reply` function of BotHandlerApi keyword = "Hello" gif_url = "https://media4.giphy.com/media/3o6ZtpxSZbQRRnwCKQ/giphy.gif" expectations = { keyword: get_bot_response(gif_url) } self.check_expected_responses( expectations=expectations, http_request=get_http_request(keyword), http_response=get_http_response_json(gif_url) )
7ac056c7db746f397c58cfa6ba4924830bf8b624
aplpy/tests/test_wcs_util.py
aplpy/tests/test_wcs_util.py
import os import numpy as np from astropy.io import fits from astropy.tests.helper import pytest from ..wcs_util import WCS, celestial_pixel_scale, non_celestial_pixel_scales from .helpers import generate_wcs HEADER_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data') HEADER_2D = fits.Header.fromtextfile(os.path.join(HEADER_DIR, '2d_fits', '1904-66_TAN.hdr')) HEADER_3D = fits.Header.fromtextfile(os.path.join(HEADER_DIR, '3d_fits', 'cube.hdr')) def test_pixel_scale_matrix_1(): wcs = WCS(HEADER_2D) np.testing.assert_allclose(wcs.pixel_scale_matrix, [[-0.06666667, 0.], [0., 0.06666667]]) def test_celestial_scale_1(): wcs = WCS(HEADER_2D) for crota2 in range(0, 360, 20): wcs.wcs.crota = [0.,crota2] np.testing.assert_allclose(celestial_pixel_scale(wcs), 0.06666667) def test_non_celestial_scales_1(): wcs = WCS(HEADER_2D) with pytest.raises(ValueError) as exc: non_celestial_pixel_scales(wcs) assert exc.value.args[0] == "WCS is celestial, use celestial_pixel_scale instead" def test_pixel_scale_matrix_2(): wcs = WCS(HEADER_3D, dimensions=[0,2], slices=[0]) np.testing.assert_allclose(wcs.pixel_scale_matrix, [[-6.38888900e-03, 0.], [0., 6.64236100e+01]]) def test_celestial_scale_2(): wcs = WCS(HEADER_3D, dimensions=[0,2], slices=[0]) with pytest.raises(ValueError) as exc: celestial_pixel_scale(wcs) assert exc.value.args[0] == "WCS is not celestial, cannot determine celestial pixel scale" def test_non_celestial_scales_2(): wcs = WCS(HEADER_3D, dimensions=[0,2], slices=[0]) np.testing.assert_allclose(non_celestial_pixel_scales(wcs), [6.38888900e-03, 6.64236100e+01])
Add regression tests for pixel scale
Add regression tests for pixel scale
Python
mit
allisony/aplpy,mwcraig/aplpy
Add regression tests for pixel scale
import os import numpy as np from astropy.io import fits from astropy.tests.helper import pytest from ..wcs_util import WCS, celestial_pixel_scale, non_celestial_pixel_scales from .helpers import generate_wcs HEADER_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data') HEADER_2D = fits.Header.fromtextfile(os.path.join(HEADER_DIR, '2d_fits', '1904-66_TAN.hdr')) HEADER_3D = fits.Header.fromtextfile(os.path.join(HEADER_DIR, '3d_fits', 'cube.hdr')) def test_pixel_scale_matrix_1(): wcs = WCS(HEADER_2D) np.testing.assert_allclose(wcs.pixel_scale_matrix, [[-0.06666667, 0.], [0., 0.06666667]]) def test_celestial_scale_1(): wcs = WCS(HEADER_2D) for crota2 in range(0, 360, 20): wcs.wcs.crota = [0.,crota2] np.testing.assert_allclose(celestial_pixel_scale(wcs), 0.06666667) def test_non_celestial_scales_1(): wcs = WCS(HEADER_2D) with pytest.raises(ValueError) as exc: non_celestial_pixel_scales(wcs) assert exc.value.args[0] == "WCS is celestial, use celestial_pixel_scale instead" def test_pixel_scale_matrix_2(): wcs = WCS(HEADER_3D, dimensions=[0,2], slices=[0]) np.testing.assert_allclose(wcs.pixel_scale_matrix, [[-6.38888900e-03, 0.], [0., 6.64236100e+01]]) def test_celestial_scale_2(): wcs = WCS(HEADER_3D, dimensions=[0,2], slices=[0]) with pytest.raises(ValueError) as exc: celestial_pixel_scale(wcs) assert exc.value.args[0] == "WCS is not celestial, cannot determine celestial pixel scale" def test_non_celestial_scales_2(): wcs = WCS(HEADER_3D, dimensions=[0,2], slices=[0]) np.testing.assert_allclose(non_celestial_pixel_scales(wcs), [6.38888900e-03, 6.64236100e+01])
<commit_before><commit_msg>Add regression tests for pixel scale<commit_after>
import os import numpy as np from astropy.io import fits from astropy.tests.helper import pytest from ..wcs_util import WCS, celestial_pixel_scale, non_celestial_pixel_scales from .helpers import generate_wcs HEADER_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data') HEADER_2D = fits.Header.fromtextfile(os.path.join(HEADER_DIR, '2d_fits', '1904-66_TAN.hdr')) HEADER_3D = fits.Header.fromtextfile(os.path.join(HEADER_DIR, '3d_fits', 'cube.hdr')) def test_pixel_scale_matrix_1(): wcs = WCS(HEADER_2D) np.testing.assert_allclose(wcs.pixel_scale_matrix, [[-0.06666667, 0.], [0., 0.06666667]]) def test_celestial_scale_1(): wcs = WCS(HEADER_2D) for crota2 in range(0, 360, 20): wcs.wcs.crota = [0.,crota2] np.testing.assert_allclose(celestial_pixel_scale(wcs), 0.06666667) def test_non_celestial_scales_1(): wcs = WCS(HEADER_2D) with pytest.raises(ValueError) as exc: non_celestial_pixel_scales(wcs) assert exc.value.args[0] == "WCS is celestial, use celestial_pixel_scale instead" def test_pixel_scale_matrix_2(): wcs = WCS(HEADER_3D, dimensions=[0,2], slices=[0]) np.testing.assert_allclose(wcs.pixel_scale_matrix, [[-6.38888900e-03, 0.], [0., 6.64236100e+01]]) def test_celestial_scale_2(): wcs = WCS(HEADER_3D, dimensions=[0,2], slices=[0]) with pytest.raises(ValueError) as exc: celestial_pixel_scale(wcs) assert exc.value.args[0] == "WCS is not celestial, cannot determine celestial pixel scale" def test_non_celestial_scales_2(): wcs = WCS(HEADER_3D, dimensions=[0,2], slices=[0]) np.testing.assert_allclose(non_celestial_pixel_scales(wcs), [6.38888900e-03, 6.64236100e+01])
Add regression tests for pixel scaleimport os import numpy as np from astropy.io import fits from astropy.tests.helper import pytest from ..wcs_util import WCS, celestial_pixel_scale, non_celestial_pixel_scales from .helpers import generate_wcs HEADER_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data') HEADER_2D = fits.Header.fromtextfile(os.path.join(HEADER_DIR, '2d_fits', '1904-66_TAN.hdr')) HEADER_3D = fits.Header.fromtextfile(os.path.join(HEADER_DIR, '3d_fits', 'cube.hdr')) def test_pixel_scale_matrix_1(): wcs = WCS(HEADER_2D) np.testing.assert_allclose(wcs.pixel_scale_matrix, [[-0.06666667, 0.], [0., 0.06666667]]) def test_celestial_scale_1(): wcs = WCS(HEADER_2D) for crota2 in range(0, 360, 20): wcs.wcs.crota = [0.,crota2] np.testing.assert_allclose(celestial_pixel_scale(wcs), 0.06666667) def test_non_celestial_scales_1(): wcs = WCS(HEADER_2D) with pytest.raises(ValueError) as exc: non_celestial_pixel_scales(wcs) assert exc.value.args[0] == "WCS is celestial, use celestial_pixel_scale instead" def test_pixel_scale_matrix_2(): wcs = WCS(HEADER_3D, dimensions=[0,2], slices=[0]) np.testing.assert_allclose(wcs.pixel_scale_matrix, [[-6.38888900e-03, 0.], [0., 6.64236100e+01]]) def test_celestial_scale_2(): wcs = WCS(HEADER_3D, dimensions=[0,2], slices=[0]) with pytest.raises(ValueError) as exc: celestial_pixel_scale(wcs) assert exc.value.args[0] == "WCS is not celestial, cannot determine celestial pixel scale" def test_non_celestial_scales_2(): wcs = WCS(HEADER_3D, dimensions=[0,2], slices=[0]) np.testing.assert_allclose(non_celestial_pixel_scales(wcs), [6.38888900e-03, 6.64236100e+01])
<commit_before><commit_msg>Add regression tests for pixel scale<commit_after>import os import numpy as np from astropy.io import fits from astropy.tests.helper import pytest from ..wcs_util import WCS, celestial_pixel_scale, non_celestial_pixel_scales from .helpers import generate_wcs HEADER_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data') HEADER_2D = fits.Header.fromtextfile(os.path.join(HEADER_DIR, '2d_fits', '1904-66_TAN.hdr')) HEADER_3D = fits.Header.fromtextfile(os.path.join(HEADER_DIR, '3d_fits', 'cube.hdr')) def test_pixel_scale_matrix_1(): wcs = WCS(HEADER_2D) np.testing.assert_allclose(wcs.pixel_scale_matrix, [[-0.06666667, 0.], [0., 0.06666667]]) def test_celestial_scale_1(): wcs = WCS(HEADER_2D) for crota2 in range(0, 360, 20): wcs.wcs.crota = [0.,crota2] np.testing.assert_allclose(celestial_pixel_scale(wcs), 0.06666667) def test_non_celestial_scales_1(): wcs = WCS(HEADER_2D) with pytest.raises(ValueError) as exc: non_celestial_pixel_scales(wcs) assert exc.value.args[0] == "WCS is celestial, use celestial_pixel_scale instead" def test_pixel_scale_matrix_2(): wcs = WCS(HEADER_3D, dimensions=[0,2], slices=[0]) np.testing.assert_allclose(wcs.pixel_scale_matrix, [[-6.38888900e-03, 0.], [0., 6.64236100e+01]]) def test_celestial_scale_2(): wcs = WCS(HEADER_3D, dimensions=[0,2], slices=[0]) with pytest.raises(ValueError) as exc: celestial_pixel_scale(wcs) assert exc.value.args[0] == "WCS is not celestial, cannot determine celestial pixel scale" def test_non_celestial_scales_2(): wcs = WCS(HEADER_3D, dimensions=[0,2], slices=[0]) np.testing.assert_allclose(non_celestial_pixel_scales(wcs), [6.38888900e-03, 6.64236100e+01])
7a3c95e5e2db3555664c0e9737032768df4940b2
phantomas/test/test_format_args.py
phantomas/test/test_format_args.py
""" Set of unit tests for helper functions """ import unittest from phantomas import Phantomas class FormatArgsTestClass(unittest.TestCase): def test_format_args(self): self.assertEqual(Phantomas.format_args(dict()), []) self.assertEqual(Phantomas.format_args(dict(foo="bar")), ['--foo=bar']) self.assertEqual(Phantomas.format_args(dict(check=True)), ['--check']) self.assertEqual(Phantomas.format_args( dict(list=['foo', 'bar', 123])), ['--list=foo,bar,123'])
Add unit tests for Phantomas.format_args
Add unit tests for Phantomas.format_args
Python
bsd-3-clause
macbre/phantomas-python,macbre/phantomas-python
Add unit tests for Phantomas.format_args
""" Set of unit tests for helper functions """ import unittest from phantomas import Phantomas class FormatArgsTestClass(unittest.TestCase): def test_format_args(self): self.assertEqual(Phantomas.format_args(dict()), []) self.assertEqual(Phantomas.format_args(dict(foo="bar")), ['--foo=bar']) self.assertEqual(Phantomas.format_args(dict(check=True)), ['--check']) self.assertEqual(Phantomas.format_args( dict(list=['foo', 'bar', 123])), ['--list=foo,bar,123'])
<commit_before><commit_msg>Add unit tests for Phantomas.format_args<commit_after>
""" Set of unit tests for helper functions """ import unittest from phantomas import Phantomas class FormatArgsTestClass(unittest.TestCase): def test_format_args(self): self.assertEqual(Phantomas.format_args(dict()), []) self.assertEqual(Phantomas.format_args(dict(foo="bar")), ['--foo=bar']) self.assertEqual(Phantomas.format_args(dict(check=True)), ['--check']) self.assertEqual(Phantomas.format_args( dict(list=['foo', 'bar', 123])), ['--list=foo,bar,123'])
Add unit tests for Phantomas.format_args""" Set of unit tests for helper functions """ import unittest from phantomas import Phantomas class FormatArgsTestClass(unittest.TestCase): def test_format_args(self): self.assertEqual(Phantomas.format_args(dict()), []) self.assertEqual(Phantomas.format_args(dict(foo="bar")), ['--foo=bar']) self.assertEqual(Phantomas.format_args(dict(check=True)), ['--check']) self.assertEqual(Phantomas.format_args( dict(list=['foo', 'bar', 123])), ['--list=foo,bar,123'])
<commit_before><commit_msg>Add unit tests for Phantomas.format_args<commit_after>""" Set of unit tests for helper functions """ import unittest from phantomas import Phantomas class FormatArgsTestClass(unittest.TestCase): def test_format_args(self): self.assertEqual(Phantomas.format_args(dict()), []) self.assertEqual(Phantomas.format_args(dict(foo="bar")), ['--foo=bar']) self.assertEqual(Phantomas.format_args(dict(check=True)), ['--check']) self.assertEqual(Phantomas.format_args( dict(list=['foo', 'bar', 123])), ['--list=foo,bar,123'])
f7aa9e986abd9fb55cb72ac4661f319a867e059d
scripts/ms_jsfs.py
scripts/ms_jsfs.py
#!/usr/bin/env python """ Convert the output of ms to an N-dimensional frequency spectrum. """ import sys import dadi if __name__ == '__main__': average = ('-av' in sys.argv) input = sys.stdin output = sys.stdout sfs,header = dadi.Spectrum.from_ms_file(input, average, mask_corners=True, return_header=True) dadi.IO.sfs_to_file(sfs, output, comment_lines=header)
Rename so we can import it and thus get useful epydoc documentation.
Rename so we can import it and thus get useful epydoc documentation. git-svn-id: 4c7b13231a96299fde701bb5dec4bd2aaf383fc6@132 979d6bd5-6d4d-0410-bece-f567c23bd345
Python
bsd-3-clause
beni55/dadi,cheese1213/dadi,paulirish/dadi,yangjl/dadi,ChenHsiang/dadi,cheese1213/dadi,RyanGutenkunst/dadi,paulirish/dadi,niuhuifei/dadi,beni55/dadi,ChenHsiang/dadi,yangjl/dadi,niuhuifei/dadi,RyanGutenkunst/dadi
Rename so we can import it and thus get useful epydoc documentation. git-svn-id: 4c7b13231a96299fde701bb5dec4bd2aaf383fc6@132 979d6bd5-6d4d-0410-bece-f567c23bd345
#!/usr/bin/env python """ Convert the output of ms to an N-dimensional frequency spectrum. """ import sys import dadi if __name__ == '__main__': average = ('-av' in sys.argv) input = sys.stdin output = sys.stdout sfs,header = dadi.Spectrum.from_ms_file(input, average, mask_corners=True, return_header=True) dadi.IO.sfs_to_file(sfs, output, comment_lines=header)
<commit_before><commit_msg>Rename so we can import it and thus get useful epydoc documentation. git-svn-id: 4c7b13231a96299fde701bb5dec4bd2aaf383fc6@132 979d6bd5-6d4d-0410-bece-f567c23bd345<commit_after>
#!/usr/bin/env python """ Convert the output of ms to an N-dimensional frequency spectrum. """ import sys import dadi if __name__ == '__main__': average = ('-av' in sys.argv) input = sys.stdin output = sys.stdout sfs,header = dadi.Spectrum.from_ms_file(input, average, mask_corners=True, return_header=True) dadi.IO.sfs_to_file(sfs, output, comment_lines=header)
Rename so we can import it and thus get useful epydoc documentation. git-svn-id: 4c7b13231a96299fde701bb5dec4bd2aaf383fc6@132 979d6bd5-6d4d-0410-bece-f567c23bd345#!/usr/bin/env python """ Convert the output of ms to an N-dimensional frequency spectrum. """ import sys import dadi if __name__ == '__main__': average = ('-av' in sys.argv) input = sys.stdin output = sys.stdout sfs,header = dadi.Spectrum.from_ms_file(input, average, mask_corners=True, return_header=True) dadi.IO.sfs_to_file(sfs, output, comment_lines=header)
<commit_before><commit_msg>Rename so we can import it and thus get useful epydoc documentation. git-svn-id: 4c7b13231a96299fde701bb5dec4bd2aaf383fc6@132 979d6bd5-6d4d-0410-bece-f567c23bd345<commit_after>#!/usr/bin/env python """ Convert the output of ms to an N-dimensional frequency spectrum. """ import sys import dadi if __name__ == '__main__': average = ('-av' in sys.argv) input = sys.stdin output = sys.stdout sfs,header = dadi.Spectrum.from_ms_file(input, average, mask_corners=True, return_header=True) dadi.IO.sfs_to_file(sfs, output, comment_lines=header)
ae07dc1cd72caf7aa04b76ef4c9377b26eec1730
telemetry/catapult_base/__init__.py
telemetry/catapult_base/__init__.py
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # All files in this directory should be moved to catapult/base/ after moving # to the new repo.
Add catapult_base folder to tools/telemetry to make the refactor easier.
Add catapult_base folder to tools/telemetry to make the refactor easier. This will make some of the refactoring more obvious and easy to review, as well as making the needed reafctoring after moving to the catapult repo easier. BUG=473414 Review URL: https://codereview.chromium.org/1168263002 Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#333399}
Python
bsd-3-clause
SummerLW/Perf-Insight-Report,catapult-project/catapult,benschmaus/catapult,catapult-project/catapult-csm,catapult-project/catapult-csm,SummerLW/Perf-Insight-Report,SummerLW/Perf-Insight-Report,sahiljain/catapult,catapult-project/catapult,sahiljain/catapult,benschmaus/catapult,catapult-project/catapult-csm,catapult-project/catapult-csm,catapult-project/catapult-csm,catapult-project/catapult,SummerLW/Perf-Insight-Report,catapult-project/catapult-csm,benschmaus/catapult,catapult-project/catapult,catapult-project/catapult-csm,benschmaus/catapult,catapult-project/catapult,SummerLW/Perf-Insight-Report,sahiljain/catapult,catapult-project/catapult,sahiljain/catapult,benschmaus/catapult,benschmaus/catapult,benschmaus/catapult,sahiljain/catapult,sahiljain/catapult,catapult-project/catapult,SummerLW/Perf-Insight-Report
Add catapult_base folder to tools/telemetry to make the refactor easier. This will make some of the refactoring more obvious and easy to review, as well as making the needed reafctoring after moving to the catapult repo easier. BUG=473414 Review URL: https://codereview.chromium.org/1168263002 Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#333399}
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # All files in this directory should be moved to catapult/base/ after moving # to the new repo.
<commit_before><commit_msg>Add catapult_base folder to tools/telemetry to make the refactor easier. This will make some of the refactoring more obvious and easy to review, as well as making the needed reafctoring after moving to the catapult repo easier. BUG=473414 Review URL: https://codereview.chromium.org/1168263002 Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#333399}<commit_after>
# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # All files in this directory should be moved to catapult/base/ after moving # to the new repo.
Add catapult_base folder to tools/telemetry to make the refactor easier. This will make some of the refactoring more obvious and easy to review, as well as making the needed reafctoring after moving to the catapult repo easier. BUG=473414 Review URL: https://codereview.chromium.org/1168263002 Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#333399}# Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # All files in this directory should be moved to catapult/base/ after moving # to the new repo.
<commit_before><commit_msg>Add catapult_base folder to tools/telemetry to make the refactor easier. This will make some of the refactoring more obvious and easy to review, as well as making the needed reafctoring after moving to the catapult repo easier. BUG=473414 Review URL: https://codereview.chromium.org/1168263002 Cr-Commit-Position: 972c6d2dc6dd5efdad1377c0d224e03eb8f276f7@{#333399}<commit_after># Copyright 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # All files in this directory should be moved to catapult/base/ after moving # to the new repo.
cfa98dfb045b8130add85366215b26a6bf04d5ec
cogs/pandentia/tempvoice.py
cogs/pandentia/tempvoice.py
from discord.ext import commands from cogs.utils import checks from cogs.utils.dataIO import dataIO import discord import asyncio class TemporaryVoice: """A cog to create TeamSpeak-like voice channels.""" def __init__(self, liara): self.liara = liara self.config = dataIO.load_json('pandentia.tempvoice') self.config_default = {'channel': None, 'limit': 0} @staticmethod def filter(channels): _channels = [] for channel in channels: if channel.name.startswith('\U0001d173' * 3): _channels.append(channel) return _channels @staticmethod async def create_channel(member: discord.Member): guild: discord.Guild = member.guild overwrites = { guild.default_role: discord.PermissionOverwrite(connect=False), member: discord.PermissionOverwrite(connect=True, manage_roles=True) } channel = await guild.create_voice_channel(('\U0001d173' * 3 + '{}\'s Channel'.format(member.name))[0:32], overwrites=overwrites) await member.move_to(channel) async def on_voice_state_update(self, member, *_): guild: discord.Guild = member.guild if guild is None: return # /shrug if self.config.get(guild.id) is None: return # lobby processing channel = self.liara.get_channel(self.config[guild.id]['channel']) if channel is None: return for member in channel.members: try: await self.create_channel(member) except discord.Forbidden: pass # empty channel cleanup await asyncio.sleep(1) # wait for the dust to settle channels = self.filter(guild.voice_channels) for channel in channels: if len(channel.members) == 0: try: await channel.delete() except discord.NotFound: pass @commands.command() @checks.mod_or_permissions(manage_channels=True) async def create_lobby(self, ctx): """Creates a temporary voice lobby.""" config = self.config.get(ctx.guild.id, self.config_default) if config['channel'] is not None: channel = self.liara.get_channel(config['channel']) if channel is not None: await ctx.send('You need to remove the original lobby before creating another one.') return try: channel = await ctx.guild.create_voice_channel('Lobby', overwrites={ ctx.guild.default_role: discord.PermissionOverwrite(speak=False)}) if self.config.get(ctx.guild.id) is None: config['channel'] = channel.id self.config[ctx.guild.id] = config else: self.config[ctx.guild.id]['channel'] = channel.id await ctx.send('Channel created! You can rename it to whatever you want now.') except discord.Forbidden: await ctx.send('It would appear that I don\'t have permissions to create channels.') def setup(liara): liara.add_cog(TemporaryVoice(liara))
Add a temporary voice channel cog
Add a temporary voice channel cog
Python
mit
Thessia/Liara
Add a temporary voice channel cog
from discord.ext import commands from cogs.utils import checks from cogs.utils.dataIO import dataIO import discord import asyncio class TemporaryVoice: """A cog to create TeamSpeak-like voice channels.""" def __init__(self, liara): self.liara = liara self.config = dataIO.load_json('pandentia.tempvoice') self.config_default = {'channel': None, 'limit': 0} @staticmethod def filter(channels): _channels = [] for channel in channels: if channel.name.startswith('\U0001d173' * 3): _channels.append(channel) return _channels @staticmethod async def create_channel(member: discord.Member): guild: discord.Guild = member.guild overwrites = { guild.default_role: discord.PermissionOverwrite(connect=False), member: discord.PermissionOverwrite(connect=True, manage_roles=True) } channel = await guild.create_voice_channel(('\U0001d173' * 3 + '{}\'s Channel'.format(member.name))[0:32], overwrites=overwrites) await member.move_to(channel) async def on_voice_state_update(self, member, *_): guild: discord.Guild = member.guild if guild is None: return # /shrug if self.config.get(guild.id) is None: return # lobby processing channel = self.liara.get_channel(self.config[guild.id]['channel']) if channel is None: return for member in channel.members: try: await self.create_channel(member) except discord.Forbidden: pass # empty channel cleanup await asyncio.sleep(1) # wait for the dust to settle channels = self.filter(guild.voice_channels) for channel in channels: if len(channel.members) == 0: try: await channel.delete() except discord.NotFound: pass @commands.command() @checks.mod_or_permissions(manage_channels=True) async def create_lobby(self, ctx): """Creates a temporary voice lobby.""" config = self.config.get(ctx.guild.id, self.config_default) if config['channel'] is not None: channel = self.liara.get_channel(config['channel']) if channel is not None: await ctx.send('You need to remove the original lobby before creating another one.') return try: channel = await ctx.guild.create_voice_channel('Lobby', overwrites={ ctx.guild.default_role: discord.PermissionOverwrite(speak=False)}) if self.config.get(ctx.guild.id) is None: config['channel'] = channel.id self.config[ctx.guild.id] = config else: self.config[ctx.guild.id]['channel'] = channel.id await ctx.send('Channel created! You can rename it to whatever you want now.') except discord.Forbidden: await ctx.send('It would appear that I don\'t have permissions to create channels.') def setup(liara): liara.add_cog(TemporaryVoice(liara))
<commit_before><commit_msg>Add a temporary voice channel cog<commit_after>
from discord.ext import commands from cogs.utils import checks from cogs.utils.dataIO import dataIO import discord import asyncio class TemporaryVoice: """A cog to create TeamSpeak-like voice channels.""" def __init__(self, liara): self.liara = liara self.config = dataIO.load_json('pandentia.tempvoice') self.config_default = {'channel': None, 'limit': 0} @staticmethod def filter(channels): _channels = [] for channel in channels: if channel.name.startswith('\U0001d173' * 3): _channels.append(channel) return _channels @staticmethod async def create_channel(member: discord.Member): guild: discord.Guild = member.guild overwrites = { guild.default_role: discord.PermissionOverwrite(connect=False), member: discord.PermissionOverwrite(connect=True, manage_roles=True) } channel = await guild.create_voice_channel(('\U0001d173' * 3 + '{}\'s Channel'.format(member.name))[0:32], overwrites=overwrites) await member.move_to(channel) async def on_voice_state_update(self, member, *_): guild: discord.Guild = member.guild if guild is None: return # /shrug if self.config.get(guild.id) is None: return # lobby processing channel = self.liara.get_channel(self.config[guild.id]['channel']) if channel is None: return for member in channel.members: try: await self.create_channel(member) except discord.Forbidden: pass # empty channel cleanup await asyncio.sleep(1) # wait for the dust to settle channels = self.filter(guild.voice_channels) for channel in channels: if len(channel.members) == 0: try: await channel.delete() except discord.NotFound: pass @commands.command() @checks.mod_or_permissions(manage_channels=True) async def create_lobby(self, ctx): """Creates a temporary voice lobby.""" config = self.config.get(ctx.guild.id, self.config_default) if config['channel'] is not None: channel = self.liara.get_channel(config['channel']) if channel is not None: await ctx.send('You need to remove the original lobby before creating another one.') return try: channel = await ctx.guild.create_voice_channel('Lobby', overwrites={ ctx.guild.default_role: discord.PermissionOverwrite(speak=False)}) if self.config.get(ctx.guild.id) is None: config['channel'] = channel.id self.config[ctx.guild.id] = config else: self.config[ctx.guild.id]['channel'] = channel.id await ctx.send('Channel created! You can rename it to whatever you want now.') except discord.Forbidden: await ctx.send('It would appear that I don\'t have permissions to create channels.') def setup(liara): liara.add_cog(TemporaryVoice(liara))
Add a temporary voice channel cogfrom discord.ext import commands from cogs.utils import checks from cogs.utils.dataIO import dataIO import discord import asyncio class TemporaryVoice: """A cog to create TeamSpeak-like voice channels.""" def __init__(self, liara): self.liara = liara self.config = dataIO.load_json('pandentia.tempvoice') self.config_default = {'channel': None, 'limit': 0} @staticmethod def filter(channels): _channels = [] for channel in channels: if channel.name.startswith('\U0001d173' * 3): _channels.append(channel) return _channels @staticmethod async def create_channel(member: discord.Member): guild: discord.Guild = member.guild overwrites = { guild.default_role: discord.PermissionOverwrite(connect=False), member: discord.PermissionOverwrite(connect=True, manage_roles=True) } channel = await guild.create_voice_channel(('\U0001d173' * 3 + '{}\'s Channel'.format(member.name))[0:32], overwrites=overwrites) await member.move_to(channel) async def on_voice_state_update(self, member, *_): guild: discord.Guild = member.guild if guild is None: return # /shrug if self.config.get(guild.id) is None: return # lobby processing channel = self.liara.get_channel(self.config[guild.id]['channel']) if channel is None: return for member in channel.members: try: await self.create_channel(member) except discord.Forbidden: pass # empty channel cleanup await asyncio.sleep(1) # wait for the dust to settle channels = self.filter(guild.voice_channels) for channel in channels: if len(channel.members) == 0: try: await channel.delete() except discord.NotFound: pass @commands.command() @checks.mod_or_permissions(manage_channels=True) async def create_lobby(self, ctx): """Creates a temporary voice lobby.""" config = self.config.get(ctx.guild.id, self.config_default) if config['channel'] is not None: channel = self.liara.get_channel(config['channel']) if channel is not None: await ctx.send('You need to remove the original lobby before creating another one.') return try: channel = await ctx.guild.create_voice_channel('Lobby', overwrites={ ctx.guild.default_role: discord.PermissionOverwrite(speak=False)}) if self.config.get(ctx.guild.id) is None: config['channel'] = channel.id self.config[ctx.guild.id] = config else: self.config[ctx.guild.id]['channel'] = channel.id await ctx.send('Channel created! You can rename it to whatever you want now.') except discord.Forbidden: await ctx.send('It would appear that I don\'t have permissions to create channels.') def setup(liara): liara.add_cog(TemporaryVoice(liara))
<commit_before><commit_msg>Add a temporary voice channel cog<commit_after>from discord.ext import commands from cogs.utils import checks from cogs.utils.dataIO import dataIO import discord import asyncio class TemporaryVoice: """A cog to create TeamSpeak-like voice channels.""" def __init__(self, liara): self.liara = liara self.config = dataIO.load_json('pandentia.tempvoice') self.config_default = {'channel': None, 'limit': 0} @staticmethod def filter(channels): _channels = [] for channel in channels: if channel.name.startswith('\U0001d173' * 3): _channels.append(channel) return _channels @staticmethod async def create_channel(member: discord.Member): guild: discord.Guild = member.guild overwrites = { guild.default_role: discord.PermissionOverwrite(connect=False), member: discord.PermissionOverwrite(connect=True, manage_roles=True) } channel = await guild.create_voice_channel(('\U0001d173' * 3 + '{}\'s Channel'.format(member.name))[0:32], overwrites=overwrites) await member.move_to(channel) async def on_voice_state_update(self, member, *_): guild: discord.Guild = member.guild if guild is None: return # /shrug if self.config.get(guild.id) is None: return # lobby processing channel = self.liara.get_channel(self.config[guild.id]['channel']) if channel is None: return for member in channel.members: try: await self.create_channel(member) except discord.Forbidden: pass # empty channel cleanup await asyncio.sleep(1) # wait for the dust to settle channels = self.filter(guild.voice_channels) for channel in channels: if len(channel.members) == 0: try: await channel.delete() except discord.NotFound: pass @commands.command() @checks.mod_or_permissions(manage_channels=True) async def create_lobby(self, ctx): """Creates a temporary voice lobby.""" config = self.config.get(ctx.guild.id, self.config_default) if config['channel'] is not None: channel = self.liara.get_channel(config['channel']) if channel is not None: await ctx.send('You need to remove the original lobby before creating another one.') return try: channel = await ctx.guild.create_voice_channel('Lobby', overwrites={ ctx.guild.default_role: discord.PermissionOverwrite(speak=False)}) if self.config.get(ctx.guild.id) is None: config['channel'] = channel.id self.config[ctx.guild.id] = config else: self.config[ctx.guild.id]['channel'] = channel.id await ctx.send('Channel created! You can rename it to whatever you want now.') except discord.Forbidden: await ctx.send('It would appear that I don\'t have permissions to create channels.') def setup(liara): liara.add_cog(TemporaryVoice(liara))
4b8a7e3506982cf155bb7f51ef1779799f4e3b85
name/validators.py
name/validators.py
from django.core.exceptions import ValidationError def follow_merged_with(name): """A generator to get the merged_with relationship of a Name object. This will return a Name object until it reaches a Name that does not have a merged_with relationship. """ while name: merged_into = name.merged_with if merged_into: yield merged_into name = merged_into def validate_merged_with(name): """Validator for the merged_with ForeignKey field. This will prevent two scenarios from occurring. 1. Merging with a nonexistent Name object. 2. Creating a loop of foreign key relationships. For example: Name 1 -> Name 2 -> Name 3 -> Name 1 We need to prevent this because navigating to a name that has been merged with another, will redirect you to the Name it has been merged with. If a loop is created, we will also create the opportunity for an HTTP redirect loop. """ try: merge_target = name.__class__.objects.get(id=name.merged_with_id) except name.__class__.DoesNotExist: raise ValidationError( dict(merged_with=u'The merge target must exist.')) if name.merged_with_id == name.id: raise ValidationError( dict(merged_with=u'Unable to merge a Name with itself.')) # Iterate through the generator and keep track of the return names. # We will find a loop if the return name is already in # merged_list. If this happens we will raise a validation error. # If we don't find duplicates, then no loop has been created and # the generator will raise it's own StopIteration and we will # implicitly return. merge_sequence = [name] for name in follow_merged_with(merge_target): if name in merge_sequence: msg = (u'The specified merge action completes a merge loop. ' 'Unable to complete merge.') raise ValidationError(dict(merged_with=msg)) merge_sequence.append(name)
Rework the validator so it is aware of the model instance and the merged with model.
Rework the validator so it is aware of the model instance and the merged with model.
Python
bsd-3-clause
damonkelley/django-name,damonkelley/django-name,unt-libraries/django-name,unt-libraries/django-name,unt-libraries/django-name,damonkelley/django-name
Rework the validator so it is aware of the model instance and the merged with model.
from django.core.exceptions import ValidationError def follow_merged_with(name): """A generator to get the merged_with relationship of a Name object. This will return a Name object until it reaches a Name that does not have a merged_with relationship. """ while name: merged_into = name.merged_with if merged_into: yield merged_into name = merged_into def validate_merged_with(name): """Validator for the merged_with ForeignKey field. This will prevent two scenarios from occurring. 1. Merging with a nonexistent Name object. 2. Creating a loop of foreign key relationships. For example: Name 1 -> Name 2 -> Name 3 -> Name 1 We need to prevent this because navigating to a name that has been merged with another, will redirect you to the Name it has been merged with. If a loop is created, we will also create the opportunity for an HTTP redirect loop. """ try: merge_target = name.__class__.objects.get(id=name.merged_with_id) except name.__class__.DoesNotExist: raise ValidationError( dict(merged_with=u'The merge target must exist.')) if name.merged_with_id == name.id: raise ValidationError( dict(merged_with=u'Unable to merge a Name with itself.')) # Iterate through the generator and keep track of the return names. # We will find a loop if the return name is already in # merged_list. If this happens we will raise a validation error. # If we don't find duplicates, then no loop has been created and # the generator will raise it's own StopIteration and we will # implicitly return. merge_sequence = [name] for name in follow_merged_with(merge_target): if name in merge_sequence: msg = (u'The specified merge action completes a merge loop. ' 'Unable to complete merge.') raise ValidationError(dict(merged_with=msg)) merge_sequence.append(name)
<commit_before><commit_msg>Rework the validator so it is aware of the model instance and the merged with model.<commit_after>
from django.core.exceptions import ValidationError def follow_merged_with(name): """A generator to get the merged_with relationship of a Name object. This will return a Name object until it reaches a Name that does not have a merged_with relationship. """ while name: merged_into = name.merged_with if merged_into: yield merged_into name = merged_into def validate_merged_with(name): """Validator for the merged_with ForeignKey field. This will prevent two scenarios from occurring. 1. Merging with a nonexistent Name object. 2. Creating a loop of foreign key relationships. For example: Name 1 -> Name 2 -> Name 3 -> Name 1 We need to prevent this because navigating to a name that has been merged with another, will redirect you to the Name it has been merged with. If a loop is created, we will also create the opportunity for an HTTP redirect loop. """ try: merge_target = name.__class__.objects.get(id=name.merged_with_id) except name.__class__.DoesNotExist: raise ValidationError( dict(merged_with=u'The merge target must exist.')) if name.merged_with_id == name.id: raise ValidationError( dict(merged_with=u'Unable to merge a Name with itself.')) # Iterate through the generator and keep track of the return names. # We will find a loop if the return name is already in # merged_list. If this happens we will raise a validation error. # If we don't find duplicates, then no loop has been created and # the generator will raise it's own StopIteration and we will # implicitly return. merge_sequence = [name] for name in follow_merged_with(merge_target): if name in merge_sequence: msg = (u'The specified merge action completes a merge loop. ' 'Unable to complete merge.') raise ValidationError(dict(merged_with=msg)) merge_sequence.append(name)
Rework the validator so it is aware of the model instance and the merged with model.from django.core.exceptions import ValidationError def follow_merged_with(name): """A generator to get the merged_with relationship of a Name object. This will return a Name object until it reaches a Name that does not have a merged_with relationship. """ while name: merged_into = name.merged_with if merged_into: yield merged_into name = merged_into def validate_merged_with(name): """Validator for the merged_with ForeignKey field. This will prevent two scenarios from occurring. 1. Merging with a nonexistent Name object. 2. Creating a loop of foreign key relationships. For example: Name 1 -> Name 2 -> Name 3 -> Name 1 We need to prevent this because navigating to a name that has been merged with another, will redirect you to the Name it has been merged with. If a loop is created, we will also create the opportunity for an HTTP redirect loop. """ try: merge_target = name.__class__.objects.get(id=name.merged_with_id) except name.__class__.DoesNotExist: raise ValidationError( dict(merged_with=u'The merge target must exist.')) if name.merged_with_id == name.id: raise ValidationError( dict(merged_with=u'Unable to merge a Name with itself.')) # Iterate through the generator and keep track of the return names. # We will find a loop if the return name is already in # merged_list. If this happens we will raise a validation error. # If we don't find duplicates, then no loop has been created and # the generator will raise it's own StopIteration and we will # implicitly return. merge_sequence = [name] for name in follow_merged_with(merge_target): if name in merge_sequence: msg = (u'The specified merge action completes a merge loop. ' 'Unable to complete merge.') raise ValidationError(dict(merged_with=msg)) merge_sequence.append(name)
<commit_before><commit_msg>Rework the validator so it is aware of the model instance and the merged with model.<commit_after>from django.core.exceptions import ValidationError def follow_merged_with(name): """A generator to get the merged_with relationship of a Name object. This will return a Name object until it reaches a Name that does not have a merged_with relationship. """ while name: merged_into = name.merged_with if merged_into: yield merged_into name = merged_into def validate_merged_with(name): """Validator for the merged_with ForeignKey field. This will prevent two scenarios from occurring. 1. Merging with a nonexistent Name object. 2. Creating a loop of foreign key relationships. For example: Name 1 -> Name 2 -> Name 3 -> Name 1 We need to prevent this because navigating to a name that has been merged with another, will redirect you to the Name it has been merged with. If a loop is created, we will also create the opportunity for an HTTP redirect loop. """ try: merge_target = name.__class__.objects.get(id=name.merged_with_id) except name.__class__.DoesNotExist: raise ValidationError( dict(merged_with=u'The merge target must exist.')) if name.merged_with_id == name.id: raise ValidationError( dict(merged_with=u'Unable to merge a Name with itself.')) # Iterate through the generator and keep track of the return names. # We will find a loop if the return name is already in # merged_list. If this happens we will raise a validation error. # If we don't find duplicates, then no loop has been created and # the generator will raise it's own StopIteration and we will # implicitly return. merge_sequence = [name] for name in follow_merged_with(merge_target): if name in merge_sequence: msg = (u'The specified merge action completes a merge loop. ' 'Unable to complete merge.') raise ValidationError(dict(merged_with=msg)) merge_sequence.append(name)
0f6f829120d7ccca1803e74a7620aef3e8bfa73f
__init__.py
__init__.py
# Copyright 2022 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """XManager."""
Make XManager a regular package rather than a namespace package.
Make XManager a regular package rather than a namespace package. PiperOrigin-RevId: 426942901 Change-Id: I9fae90c19996755abbca4f87dad38254aa2e174e GitOrigin-RevId: 7ca0daad14db366fe4610488539d37fc08e78dc9
Python
apache-2.0
deepmind/xmanager,deepmind/xmanager
Make XManager a regular package rather than a namespace package. PiperOrigin-RevId: 426942901 Change-Id: I9fae90c19996755abbca4f87dad38254aa2e174e GitOrigin-RevId: 7ca0daad14db366fe4610488539d37fc08e78dc9
# Copyright 2022 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """XManager."""
<commit_before><commit_msg>Make XManager a regular package rather than a namespace package. PiperOrigin-RevId: 426942901 Change-Id: I9fae90c19996755abbca4f87dad38254aa2e174e GitOrigin-RevId: 7ca0daad14db366fe4610488539d37fc08e78dc9<commit_after>
# Copyright 2022 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """XManager."""
Make XManager a regular package rather than a namespace package. PiperOrigin-RevId: 426942901 Change-Id: I9fae90c19996755abbca4f87dad38254aa2e174e GitOrigin-RevId: 7ca0daad14db366fe4610488539d37fc08e78dc9# Copyright 2022 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """XManager."""
<commit_before><commit_msg>Make XManager a regular package rather than a namespace package. PiperOrigin-RevId: 426942901 Change-Id: I9fae90c19996755abbca4f87dad38254aa2e174e GitOrigin-RevId: 7ca0daad14db366fe4610488539d37fc08e78dc9<commit_after># Copyright 2022 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """XManager."""
e9eb2f37b3972cc840525682d9b607b160859d8e
tests/sc_utils_test.py
tests/sc_utils_test.py
import unittest import context class TestEssStates(unittest.TestCase): def test_safe_add(self): from sc_utils import safeadd self.assertTrue(safeadd() is None) self.assertTrue(safeadd(None, None) is None) self.assertTrue(safeadd(1, None) == 1) self.assertTrue(safeadd(1, 2, 3) == 6) self.assertTrue(safeadd(1, 2, 3, None) == 6) self.assertTrue(safeadd(0) == 0) self.assertTrue(safeadd(0, None) == 0)
Add unit test module for sc_utils (to be expanded over time).
Add unit test module for sc_utils (to be expanded over time).
Python
mit
victronenergy/dbus-systemcalc-py
Add unit test module for sc_utils (to be expanded over time).
import unittest import context class TestEssStates(unittest.TestCase): def test_safe_add(self): from sc_utils import safeadd self.assertTrue(safeadd() is None) self.assertTrue(safeadd(None, None) is None) self.assertTrue(safeadd(1, None) == 1) self.assertTrue(safeadd(1, 2, 3) == 6) self.assertTrue(safeadd(1, 2, 3, None) == 6) self.assertTrue(safeadd(0) == 0) self.assertTrue(safeadd(0, None) == 0)
<commit_before><commit_msg>Add unit test module for sc_utils (to be expanded over time).<commit_after>
import unittest import context class TestEssStates(unittest.TestCase): def test_safe_add(self): from sc_utils import safeadd self.assertTrue(safeadd() is None) self.assertTrue(safeadd(None, None) is None) self.assertTrue(safeadd(1, None) == 1) self.assertTrue(safeadd(1, 2, 3) == 6) self.assertTrue(safeadd(1, 2, 3, None) == 6) self.assertTrue(safeadd(0) == 0) self.assertTrue(safeadd(0, None) == 0)
Add unit test module for sc_utils (to be expanded over time).import unittest import context class TestEssStates(unittest.TestCase): def test_safe_add(self): from sc_utils import safeadd self.assertTrue(safeadd() is None) self.assertTrue(safeadd(None, None) is None) self.assertTrue(safeadd(1, None) == 1) self.assertTrue(safeadd(1, 2, 3) == 6) self.assertTrue(safeadd(1, 2, 3, None) == 6) self.assertTrue(safeadd(0) == 0) self.assertTrue(safeadd(0, None) == 0)
<commit_before><commit_msg>Add unit test module for sc_utils (to be expanded over time).<commit_after>import unittest import context class TestEssStates(unittest.TestCase): def test_safe_add(self): from sc_utils import safeadd self.assertTrue(safeadd() is None) self.assertTrue(safeadd(None, None) is None) self.assertTrue(safeadd(1, None) == 1) self.assertTrue(safeadd(1, 2, 3) == 6) self.assertTrue(safeadd(1, 2, 3, None) == 6) self.assertTrue(safeadd(0) == 0) self.assertTrue(safeadd(0, None) == 0)
3e718763a00aa7055d6e4c6b2f0fcbff853d6d0d
dbaas/workflow/steps/util/region_migration/update_zabbix_host.py
dbaas/workflow/steps/util/region_migration/update_zabbix_host.py
# -*- coding: utf-8 -*- import logging from util import full_stack from dbaas_credentials.credential import Credential from dbaas_credentials.models import CredentialType from dbaas_zabbix import factory_for from workflow.steps.util.base import BaseStep from workflow.exceptions.error_codes import DBAAS_0012 LOG = logging.getLogger(__name__) class UpdateZabbixHost(BaseStep): def __unicode__(self): return "Updating zabbix monitoring..." def do(self, workflow_dict): try: if 'databaseinfra' not in workflow_dict: return False databaseinfra = workflow_dict['databaseinfra'] environment = workflow_dict['source_environment'] integration = CredentialType.objects.get(type=CredentialType.ZABBIX) credentials = Credential.get_credentials(environment=environment, integration=integration) zabbix_provider = factory_for(databaseinfra=databaseinfra, credentials=credentials) LOG.info("Updating zabbix monitoring for {}...".format(databaseinfra)) hosts = workflow_dict['source_hosts'] for host in hosts: future_host = host.future_host zabbix_provider.update_host_interface(host_name=host.hostname, ip=future_host.address) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0012) workflow_dict['exceptions']['traceback'].append(traceback) return False def undo(self, workflow_dict): try: if 'databaseinfra' not in workflow_dict: return False databaseinfra = workflow_dict['databaseinfra'] environment = workflow_dict['source_environment'] integration = CredentialType.objects.get(type=CredentialType.ZABBIX) credentials = Credential.get_credentials(environment=environment, integration=integration) zabbix_provider = factory_for(databaseinfra=databaseinfra, credentials=credentials) LOG.info("Updating zabbix monitoring for {}...".format(databaseinfra)) hosts = workflow_dict['source_hosts'] for host in hosts: zabbix_provider.update_host_interface(host_name=host.hostname, ip=host.address) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0012) workflow_dict['exceptions']['traceback'].append(traceback) return False
Add step to update zabbix host
Add step to update zabbix host
Python
bsd-3-clause
globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service
Add step to update zabbix host
# -*- coding: utf-8 -*- import logging from util import full_stack from dbaas_credentials.credential import Credential from dbaas_credentials.models import CredentialType from dbaas_zabbix import factory_for from workflow.steps.util.base import BaseStep from workflow.exceptions.error_codes import DBAAS_0012 LOG = logging.getLogger(__name__) class UpdateZabbixHost(BaseStep): def __unicode__(self): return "Updating zabbix monitoring..." def do(self, workflow_dict): try: if 'databaseinfra' not in workflow_dict: return False databaseinfra = workflow_dict['databaseinfra'] environment = workflow_dict['source_environment'] integration = CredentialType.objects.get(type=CredentialType.ZABBIX) credentials = Credential.get_credentials(environment=environment, integration=integration) zabbix_provider = factory_for(databaseinfra=databaseinfra, credentials=credentials) LOG.info("Updating zabbix monitoring for {}...".format(databaseinfra)) hosts = workflow_dict['source_hosts'] for host in hosts: future_host = host.future_host zabbix_provider.update_host_interface(host_name=host.hostname, ip=future_host.address) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0012) workflow_dict['exceptions']['traceback'].append(traceback) return False def undo(self, workflow_dict): try: if 'databaseinfra' not in workflow_dict: return False databaseinfra = workflow_dict['databaseinfra'] environment = workflow_dict['source_environment'] integration = CredentialType.objects.get(type=CredentialType.ZABBIX) credentials = Credential.get_credentials(environment=environment, integration=integration) zabbix_provider = factory_for(databaseinfra=databaseinfra, credentials=credentials) LOG.info("Updating zabbix monitoring for {}...".format(databaseinfra)) hosts = workflow_dict['source_hosts'] for host in hosts: zabbix_provider.update_host_interface(host_name=host.hostname, ip=host.address) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0012) workflow_dict['exceptions']['traceback'].append(traceback) return False
<commit_before><commit_msg>Add step to update zabbix host<commit_after>
# -*- coding: utf-8 -*- import logging from util import full_stack from dbaas_credentials.credential import Credential from dbaas_credentials.models import CredentialType from dbaas_zabbix import factory_for from workflow.steps.util.base import BaseStep from workflow.exceptions.error_codes import DBAAS_0012 LOG = logging.getLogger(__name__) class UpdateZabbixHost(BaseStep): def __unicode__(self): return "Updating zabbix monitoring..." def do(self, workflow_dict): try: if 'databaseinfra' not in workflow_dict: return False databaseinfra = workflow_dict['databaseinfra'] environment = workflow_dict['source_environment'] integration = CredentialType.objects.get(type=CredentialType.ZABBIX) credentials = Credential.get_credentials(environment=environment, integration=integration) zabbix_provider = factory_for(databaseinfra=databaseinfra, credentials=credentials) LOG.info("Updating zabbix monitoring for {}...".format(databaseinfra)) hosts = workflow_dict['source_hosts'] for host in hosts: future_host = host.future_host zabbix_provider.update_host_interface(host_name=host.hostname, ip=future_host.address) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0012) workflow_dict['exceptions']['traceback'].append(traceback) return False def undo(self, workflow_dict): try: if 'databaseinfra' not in workflow_dict: return False databaseinfra = workflow_dict['databaseinfra'] environment = workflow_dict['source_environment'] integration = CredentialType.objects.get(type=CredentialType.ZABBIX) credentials = Credential.get_credentials(environment=environment, integration=integration) zabbix_provider = factory_for(databaseinfra=databaseinfra, credentials=credentials) LOG.info("Updating zabbix monitoring for {}...".format(databaseinfra)) hosts = workflow_dict['source_hosts'] for host in hosts: zabbix_provider.update_host_interface(host_name=host.hostname, ip=host.address) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0012) workflow_dict['exceptions']['traceback'].append(traceback) return False
Add step to update zabbix host# -*- coding: utf-8 -*- import logging from util import full_stack from dbaas_credentials.credential import Credential from dbaas_credentials.models import CredentialType from dbaas_zabbix import factory_for from workflow.steps.util.base import BaseStep from workflow.exceptions.error_codes import DBAAS_0012 LOG = logging.getLogger(__name__) class UpdateZabbixHost(BaseStep): def __unicode__(self): return "Updating zabbix monitoring..." def do(self, workflow_dict): try: if 'databaseinfra' not in workflow_dict: return False databaseinfra = workflow_dict['databaseinfra'] environment = workflow_dict['source_environment'] integration = CredentialType.objects.get(type=CredentialType.ZABBIX) credentials = Credential.get_credentials(environment=environment, integration=integration) zabbix_provider = factory_for(databaseinfra=databaseinfra, credentials=credentials) LOG.info("Updating zabbix monitoring for {}...".format(databaseinfra)) hosts = workflow_dict['source_hosts'] for host in hosts: future_host = host.future_host zabbix_provider.update_host_interface(host_name=host.hostname, ip=future_host.address) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0012) workflow_dict['exceptions']['traceback'].append(traceback) return False def undo(self, workflow_dict): try: if 'databaseinfra' not in workflow_dict: return False databaseinfra = workflow_dict['databaseinfra'] environment = workflow_dict['source_environment'] integration = CredentialType.objects.get(type=CredentialType.ZABBIX) credentials = Credential.get_credentials(environment=environment, integration=integration) zabbix_provider = factory_for(databaseinfra=databaseinfra, credentials=credentials) LOG.info("Updating zabbix monitoring for {}...".format(databaseinfra)) hosts = workflow_dict['source_hosts'] for host in hosts: zabbix_provider.update_host_interface(host_name=host.hostname, ip=host.address) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0012) workflow_dict['exceptions']['traceback'].append(traceback) return False
<commit_before><commit_msg>Add step to update zabbix host<commit_after># -*- coding: utf-8 -*- import logging from util import full_stack from dbaas_credentials.credential import Credential from dbaas_credentials.models import CredentialType from dbaas_zabbix import factory_for from workflow.steps.util.base import BaseStep from workflow.exceptions.error_codes import DBAAS_0012 LOG = logging.getLogger(__name__) class UpdateZabbixHost(BaseStep): def __unicode__(self): return "Updating zabbix monitoring..." def do(self, workflow_dict): try: if 'databaseinfra' not in workflow_dict: return False databaseinfra = workflow_dict['databaseinfra'] environment = workflow_dict['source_environment'] integration = CredentialType.objects.get(type=CredentialType.ZABBIX) credentials = Credential.get_credentials(environment=environment, integration=integration) zabbix_provider = factory_for(databaseinfra=databaseinfra, credentials=credentials) LOG.info("Updating zabbix monitoring for {}...".format(databaseinfra)) hosts = workflow_dict['source_hosts'] for host in hosts: future_host = host.future_host zabbix_provider.update_host_interface(host_name=host.hostname, ip=future_host.address) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0012) workflow_dict['exceptions']['traceback'].append(traceback) return False def undo(self, workflow_dict): try: if 'databaseinfra' not in workflow_dict: return False databaseinfra = workflow_dict['databaseinfra'] environment = workflow_dict['source_environment'] integration = CredentialType.objects.get(type=CredentialType.ZABBIX) credentials = Credential.get_credentials(environment=environment, integration=integration) zabbix_provider = factory_for(databaseinfra=databaseinfra, credentials=credentials) LOG.info("Updating zabbix monitoring for {}...".format(databaseinfra)) hosts = workflow_dict['source_hosts'] for host in hosts: zabbix_provider.update_host_interface(host_name=host.hostname, ip=host.address) return True except Exception: traceback = full_stack() workflow_dict['exceptions']['error_codes'].append(DBAAS_0012) workflow_dict['exceptions']['traceback'].append(traceback) return False
0705d32787a1e9eba49ddc514161c505be572622
web/premises/migrations/0018_premise_supporters.py
web/premises/migrations/0018_premise_supporters.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('premises', '0017_auto_20141030_0353'), ] operations = [ migrations.AddField( model_name='premise', name='supporters', field=models.ManyToManyField(related_name=b'supporting', to=settings.AUTH_USER_MODEL), preserve_default=True, ), ]
Add migration file for supporting functionality
Add migration file for supporting functionality
Python
mit
beratdogan/arguman.org,Arthur2e5/arguman.org,arguman/arguman.org,bahattincinic/arguman.org,arguman/arguman.org,omeripek/arguman.org,donkawechico/arguman.org,taiansu/arguman.org,Arthur2e5/arguman.org,taiansu/arguman.org,donkawechico/arguman.org,taiansu/arguman.org,arguman/arguman.org,donkawechico/arguman.org,omeripek/arguman.org,Arthur2e5/arguman.org,omeripek/arguman.org,Arthur2e5/arguman.org,donkawechico/arguman.org,bahattincinic/arguman.org,bahattincinic/arguman.org,taiansu/arguman.org,arguman/arguman.org,bahattincinic/arguman.org,beratdogan/arguman.org
Add migration file for supporting functionality
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('premises', '0017_auto_20141030_0353'), ] operations = [ migrations.AddField( model_name='premise', name='supporters', field=models.ManyToManyField(related_name=b'supporting', to=settings.AUTH_USER_MODEL), preserve_default=True, ), ]
<commit_before><commit_msg>Add migration file for supporting functionality<commit_after>
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('premises', '0017_auto_20141030_0353'), ] operations = [ migrations.AddField( model_name='premise', name='supporters', field=models.ManyToManyField(related_name=b'supporting', to=settings.AUTH_USER_MODEL), preserve_default=True, ), ]
Add migration file for supporting functionality# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('premises', '0017_auto_20141030_0353'), ] operations = [ migrations.AddField( model_name='premise', name='supporters', field=models.ManyToManyField(related_name=b'supporting', to=settings.AUTH_USER_MODEL), preserve_default=True, ), ]
<commit_before><commit_msg>Add migration file for supporting functionality<commit_after># -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('premises', '0017_auto_20141030_0353'), ] operations = [ migrations.AddField( model_name='premise', name='supporters', field=models.ManyToManyField(related_name=b'supporting', to=settings.AUTH_USER_MODEL), preserve_default=True, ), ]
a2fda40514a499f9980300276ec84e7065cd39b5
mistral/db/sqlalchemy/migration/alembic_migrations/versions/038_delete_delayed_calls_with_empty_key.py
mistral/db/sqlalchemy/migration/alembic_migrations/versions/038_delete_delayed_calls_with_empty_key.py
# Copyright 2020 Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Delete delayed calls with key=NULL. Revision ID: 038 Revises: 037 Create Date: 2020-7-13 13:20:00 """ # revision identifiers, used by Alembic. from alembic import op import sqlalchemy as sa from sqlalchemy.sql import table, column revision = '038' down_revision = '037' def upgrade(): # See https://bugs.launchpad.net/mistral/+bug/1861988. # Due to this bug there may be redundant delayed calls in DB. # We need to delete all rows where the "key" column is None. session = sa.orm.Session(bind=op.get_bind()) delayed_calls = table('delayed_calls_v2', column('key')) with session.begin(subtransactions=True): session.execute( delayed_calls.delete().where(delayed_calls.c.key==None) # noqa ) session.commit()
Add a migration that deletes redundant delayed calls
Add a migration that deletes redundant delayed calls * Due to the bug https://bugs.launchpad.net/mistral/+bug/1861988 some Mistral installations may have redundant delayed calls with the empty "key" column. This patch adds a DB migration that cleans them up. Change-Id: Ic46d3d711a03fbd6364809a0fddcbb48f3e28d2a Closes-Bug: #1887335
Python
apache-2.0
openstack/mistral,openstack/mistral
Add a migration that deletes redundant delayed calls * Due to the bug https://bugs.launchpad.net/mistral/+bug/1861988 some Mistral installations may have redundant delayed calls with the empty "key" column. This patch adds a DB migration that cleans them up. Change-Id: Ic46d3d711a03fbd6364809a0fddcbb48f3e28d2a Closes-Bug: #1887335
# Copyright 2020 Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Delete delayed calls with key=NULL. Revision ID: 038 Revises: 037 Create Date: 2020-7-13 13:20:00 """ # revision identifiers, used by Alembic. from alembic import op import sqlalchemy as sa from sqlalchemy.sql import table, column revision = '038' down_revision = '037' def upgrade(): # See https://bugs.launchpad.net/mistral/+bug/1861988. # Due to this bug there may be redundant delayed calls in DB. # We need to delete all rows where the "key" column is None. session = sa.orm.Session(bind=op.get_bind()) delayed_calls = table('delayed_calls_v2', column('key')) with session.begin(subtransactions=True): session.execute( delayed_calls.delete().where(delayed_calls.c.key==None) # noqa ) session.commit()
<commit_before><commit_msg>Add a migration that deletes redundant delayed calls * Due to the bug https://bugs.launchpad.net/mistral/+bug/1861988 some Mistral installations may have redundant delayed calls with the empty "key" column. This patch adds a DB migration that cleans them up. Change-Id: Ic46d3d711a03fbd6364809a0fddcbb48f3e28d2a Closes-Bug: #1887335<commit_after>
# Copyright 2020 Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Delete delayed calls with key=NULL. Revision ID: 038 Revises: 037 Create Date: 2020-7-13 13:20:00 """ # revision identifiers, used by Alembic. from alembic import op import sqlalchemy as sa from sqlalchemy.sql import table, column revision = '038' down_revision = '037' def upgrade(): # See https://bugs.launchpad.net/mistral/+bug/1861988. # Due to this bug there may be redundant delayed calls in DB. # We need to delete all rows where the "key" column is None. session = sa.orm.Session(bind=op.get_bind()) delayed_calls = table('delayed_calls_v2', column('key')) with session.begin(subtransactions=True): session.execute( delayed_calls.delete().where(delayed_calls.c.key==None) # noqa ) session.commit()
Add a migration that deletes redundant delayed calls * Due to the bug https://bugs.launchpad.net/mistral/+bug/1861988 some Mistral installations may have redundant delayed calls with the empty "key" column. This patch adds a DB migration that cleans them up. Change-Id: Ic46d3d711a03fbd6364809a0fddcbb48f3e28d2a Closes-Bug: #1887335# Copyright 2020 Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Delete delayed calls with key=NULL. Revision ID: 038 Revises: 037 Create Date: 2020-7-13 13:20:00 """ # revision identifiers, used by Alembic. from alembic import op import sqlalchemy as sa from sqlalchemy.sql import table, column revision = '038' down_revision = '037' def upgrade(): # See https://bugs.launchpad.net/mistral/+bug/1861988. # Due to this bug there may be redundant delayed calls in DB. # We need to delete all rows where the "key" column is None. session = sa.orm.Session(bind=op.get_bind()) delayed_calls = table('delayed_calls_v2', column('key')) with session.begin(subtransactions=True): session.execute( delayed_calls.delete().where(delayed_calls.c.key==None) # noqa ) session.commit()
<commit_before><commit_msg>Add a migration that deletes redundant delayed calls * Due to the bug https://bugs.launchpad.net/mistral/+bug/1861988 some Mistral installations may have redundant delayed calls with the empty "key" column. This patch adds a DB migration that cleans them up. Change-Id: Ic46d3d711a03fbd6364809a0fddcbb48f3e28d2a Closes-Bug: #1887335<commit_after># Copyright 2020 Nokia Software. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Delete delayed calls with key=NULL. Revision ID: 038 Revises: 037 Create Date: 2020-7-13 13:20:00 """ # revision identifiers, used by Alembic. from alembic import op import sqlalchemy as sa from sqlalchemy.sql import table, column revision = '038' down_revision = '037' def upgrade(): # See https://bugs.launchpad.net/mistral/+bug/1861988. # Due to this bug there may be redundant delayed calls in DB. # We need to delete all rows where the "key" column is None. session = sa.orm.Session(bind=op.get_bind()) delayed_calls = table('delayed_calls_v2', column('key')) with session.begin(subtransactions=True): session.execute( delayed_calls.delete().where(delayed_calls.c.key==None) # noqa ) session.commit()
943d258bdbd86940d259d3b288968645814ff91f
Lib/fontTools/pens/ttGlyphPen_test.py
Lib/fontTools/pens/ttGlyphPen_test.py
from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * import os import unittest from fontTools import ttLib from fontTools.pens.ttGlyphPen import TTGlyphPen class TTGlyphPenTest(unittest.TestCase): def setUp(self): #self.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) self.font = ttLib.TTFont() ttx_path = os.path.join( os.path.abspath(os.path.dirname(os.path.realpath(__file__))), '..', 'ttLib', 'testdata', 'TestTTF-Regular.ttx') self.font.importXML(ttx_path, quiet=True) self.pen = TTGlyphPen(self.font.getGlyphSet()) def test_drawGlyphsUnchanged(self): glyphSet = self.font.getGlyphSet() glyfTable = self.font['glyf'] for name in self.font.getGlyphOrder(): oldGlyph = glyphSet[name] oldGlyph.draw(self.pen) oldGlyph = oldGlyph._glyph newGlyph = self.pen.glyph() newGlyph.recalcBounds(glyfTable) if hasattr(oldGlyph, 'program'): newGlyph.program = oldGlyph.program self.assertEqual( oldGlyph.compile(glyfTable), newGlyph.compile(glyfTable)) if __name__ == '__main__': unittest.main()
Add a test using existing test data
[TTGlyphPen] Add a test using existing test data
Python
mit
fonttools/fonttools,googlefonts/fonttools
[TTGlyphPen] Add a test using existing test data
from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * import os import unittest from fontTools import ttLib from fontTools.pens.ttGlyphPen import TTGlyphPen class TTGlyphPenTest(unittest.TestCase): def setUp(self): #self.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) self.font = ttLib.TTFont() ttx_path = os.path.join( os.path.abspath(os.path.dirname(os.path.realpath(__file__))), '..', 'ttLib', 'testdata', 'TestTTF-Regular.ttx') self.font.importXML(ttx_path, quiet=True) self.pen = TTGlyphPen(self.font.getGlyphSet()) def test_drawGlyphsUnchanged(self): glyphSet = self.font.getGlyphSet() glyfTable = self.font['glyf'] for name in self.font.getGlyphOrder(): oldGlyph = glyphSet[name] oldGlyph.draw(self.pen) oldGlyph = oldGlyph._glyph newGlyph = self.pen.glyph() newGlyph.recalcBounds(glyfTable) if hasattr(oldGlyph, 'program'): newGlyph.program = oldGlyph.program self.assertEqual( oldGlyph.compile(glyfTable), newGlyph.compile(glyfTable)) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>[TTGlyphPen] Add a test using existing test data<commit_after>
from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * import os import unittest from fontTools import ttLib from fontTools.pens.ttGlyphPen import TTGlyphPen class TTGlyphPenTest(unittest.TestCase): def setUp(self): #self.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) self.font = ttLib.TTFont() ttx_path = os.path.join( os.path.abspath(os.path.dirname(os.path.realpath(__file__))), '..', 'ttLib', 'testdata', 'TestTTF-Regular.ttx') self.font.importXML(ttx_path, quiet=True) self.pen = TTGlyphPen(self.font.getGlyphSet()) def test_drawGlyphsUnchanged(self): glyphSet = self.font.getGlyphSet() glyfTable = self.font['glyf'] for name in self.font.getGlyphOrder(): oldGlyph = glyphSet[name] oldGlyph.draw(self.pen) oldGlyph = oldGlyph._glyph newGlyph = self.pen.glyph() newGlyph.recalcBounds(glyfTable) if hasattr(oldGlyph, 'program'): newGlyph.program = oldGlyph.program self.assertEqual( oldGlyph.compile(glyfTable), newGlyph.compile(glyfTable)) if __name__ == '__main__': unittest.main()
[TTGlyphPen] Add a test using existing test datafrom __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * import os import unittest from fontTools import ttLib from fontTools.pens.ttGlyphPen import TTGlyphPen class TTGlyphPenTest(unittest.TestCase): def setUp(self): #self.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) self.font = ttLib.TTFont() ttx_path = os.path.join( os.path.abspath(os.path.dirname(os.path.realpath(__file__))), '..', 'ttLib', 'testdata', 'TestTTF-Regular.ttx') self.font.importXML(ttx_path, quiet=True) self.pen = TTGlyphPen(self.font.getGlyphSet()) def test_drawGlyphsUnchanged(self): glyphSet = self.font.getGlyphSet() glyfTable = self.font['glyf'] for name in self.font.getGlyphOrder(): oldGlyph = glyphSet[name] oldGlyph.draw(self.pen) oldGlyph = oldGlyph._glyph newGlyph = self.pen.glyph() newGlyph.recalcBounds(glyfTable) if hasattr(oldGlyph, 'program'): newGlyph.program = oldGlyph.program self.assertEqual( oldGlyph.compile(glyfTable), newGlyph.compile(glyfTable)) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>[TTGlyphPen] Add a test using existing test data<commit_after>from __future__ import print_function, division, absolute_import from fontTools.misc.py23 import * import os import unittest from fontTools import ttLib from fontTools.pens.ttGlyphPen import TTGlyphPen class TTGlyphPenTest(unittest.TestCase): def setUp(self): #self.font = ttLib.TTFont(recalcBBoxes=False, recalcTimestamp=False) self.font = ttLib.TTFont() ttx_path = os.path.join( os.path.abspath(os.path.dirname(os.path.realpath(__file__))), '..', 'ttLib', 'testdata', 'TestTTF-Regular.ttx') self.font.importXML(ttx_path, quiet=True) self.pen = TTGlyphPen(self.font.getGlyphSet()) def test_drawGlyphsUnchanged(self): glyphSet = self.font.getGlyphSet() glyfTable = self.font['glyf'] for name in self.font.getGlyphOrder(): oldGlyph = glyphSet[name] oldGlyph.draw(self.pen) oldGlyph = oldGlyph._glyph newGlyph = self.pen.glyph() newGlyph.recalcBounds(glyfTable) if hasattr(oldGlyph, 'program'): newGlyph.program = oldGlyph.program self.assertEqual( oldGlyph.compile(glyfTable), newGlyph.compile(glyfTable)) if __name__ == '__main__': unittest.main()
7b5625a722a9b3e69636ffe3a89b9d314a1ce8e3
netbox/extras/management/commands/clearcache.py
netbox/extras/management/commands/clearcache.py
from django.core.cache import cache from django.core.management.base import BaseCommand class Command(BaseCommand): """Command to clear the entire cache.""" help = 'Clears the cache.' def handle(self, *args, **kwargs): cache.clear() self.stdout.write('Cache has been cleared.', ending="\n")
Add management command for clearing cache
Add management command for clearing cache
Python
apache-2.0
digitalocean/netbox,digitalocean/netbox,digitalocean/netbox,digitalocean/netbox
Add management command for clearing cache
from django.core.cache import cache from django.core.management.base import BaseCommand class Command(BaseCommand): """Command to clear the entire cache.""" help = 'Clears the cache.' def handle(self, *args, **kwargs): cache.clear() self.stdout.write('Cache has been cleared.', ending="\n")
<commit_before><commit_msg>Add management command for clearing cache<commit_after>
from django.core.cache import cache from django.core.management.base import BaseCommand class Command(BaseCommand): """Command to clear the entire cache.""" help = 'Clears the cache.' def handle(self, *args, **kwargs): cache.clear() self.stdout.write('Cache has been cleared.', ending="\n")
Add management command for clearing cachefrom django.core.cache import cache from django.core.management.base import BaseCommand class Command(BaseCommand): """Command to clear the entire cache.""" help = 'Clears the cache.' def handle(self, *args, **kwargs): cache.clear() self.stdout.write('Cache has been cleared.', ending="\n")
<commit_before><commit_msg>Add management command for clearing cache<commit_after>from django.core.cache import cache from django.core.management.base import BaseCommand class Command(BaseCommand): """Command to clear the entire cache.""" help = 'Clears the cache.' def handle(self, *args, **kwargs): cache.clear() self.stdout.write('Cache has been cleared.', ending="\n")
10070529a5b5954095deca0a7653ab46c83d10c4
numba/typesystem/exttypes/vtabtype.py
numba/typesystem/exttypes/vtabtype.py
# -*- coding: utf-8 -*- """ Virtual method table types and ordering. """ from numba import error from numba.typesystem import * #------------------------------------------------------------------------ # Virtual Method Ordering #------------------------------------------------------------------------ def unordered(parent_vtables, methoddict): return methoddict.itervalues() def extending(parent_vtables, methoddict): """ Order the virtual methods according to the given parent vtables, i.e. we can only extend existing vtables. """ if not parent_vtables: return unordered(parent_vtables, methoddict) parents = sorted(parent_vtables, key=lambda vtab: len(vtab.methoddict)) biggest_vtab = parents[-1] appending_methods = set(methoddict) - set(biggest_vtab.methodnames) return biggest_vtab.methodnames + list(appending_methods) # ______________________________________________________________________ # Validate Virtual Method Order def validate_vtab_compatibility(parent_vtables, vtab): parents = sorted(parent_vtables, key=lambda vtab: len(vtab.methoddict)) vtabs = parents + [vtab] for vtab_smaller, vtab_bigger in zip(vtabs, vtabs[1:]): names1 = vtab_smaller.methodnames names2 = vtab_bigger.methodnames[len(vtab_smaller.methodnames)] if names1 != names2: raise error.NumbaError( "Cannot create compatible method ordering for " "base classes '%s' and '%s'" % ( vtab_smaller.py_class.__name__, vtab_bigger.py_class.__name__)) #------------------------------------------------------------------------ # Virtual Method Table Type #------------------------------------------------------------------------ class VTabType(NumbaType): """ Virtual method table type. """ def __init__(self, py_class, parents): self.py_class = py_class # List of parent vtable types self.parents = parents # method_name -> Method self.methoddict = {} # Set once create_method_ordering is called, # list of ordered method names self.methodnames = None def create_method_ordering(self, ordering=unordered): """ Create a consistent method ordering with the base types. ordering ∈ { unordered, extending, ... } """ self.methodnames = ordering(self.parents, self.methoddict) def add_method(self, method): """ Add a method to the vtab type and verify it with any parent method signatures. """ if method.name in self.methoddict: # Patch current signature after type inference signature = self.get_signature(method.name) assert method.signature.args == signature.args if signature.return_type is None: signature.return_type = method.signature.return_type else: assert signature.return_type == method.signature.return_type, \ method.signature self.methoddict[method.name] = method def get_signature(self, method_name): "Get the signature for the given method name. Returns ExtMethodType" method = self.methoddict[method_name] return method.signature
Add virtual method table type and ordering
Add virtual method table type and ordering
Python
bsd-2-clause
shiquanwang/numba,sklam/numba,IntelLabs/numba,stefanseefeld/numba,GaZ3ll3/numba,ssarangi/numba,sklam/numba,stonebig/numba,sklam/numba,numba/numba,seibert/numba,pitrou/numba,numba/numba,ssarangi/numba,pitrou/numba,jriehl/numba,numba/numba,seibert/numba,stonebig/numba,pombredanne/numba,cpcloud/numba,seibert/numba,gmarkall/numba,gmarkall/numba,seibert/numba,stonebig/numba,stuartarchibald/numba,pitrou/numba,cpcloud/numba,pombredanne/numba,gmarkall/numba,gdementen/numba,stefanseefeld/numba,jriehl/numba,IntelLabs/numba,jriehl/numba,stuartarchibald/numba,stuartarchibald/numba,stonebig/numba,gdementen/numba,seibert/numba,sklam/numba,pombredanne/numba,GaZ3ll3/numba,cpcloud/numba,stuartarchibald/numba,gdementen/numba,gdementen/numba,stefanseefeld/numba,GaZ3ll3/numba,numba/numba,pombredanne/numba,stuartarchibald/numba,shiquanwang/numba,cpcloud/numba,pombredanne/numba,shiquanwang/numba,stefanseefeld/numba,IntelLabs/numba,stonebig/numba,gmarkall/numba,gdementen/numba,gmarkall/numba,jriehl/numba,IntelLabs/numba,pitrou/numba,ssarangi/numba,jriehl/numba,ssarangi/numba,numba/numba,GaZ3ll3/numba,pitrou/numba,stefanseefeld/numba,GaZ3ll3/numba,IntelLabs/numba,sklam/numba,cpcloud/numba,ssarangi/numba
Add virtual method table type and ordering
# -*- coding: utf-8 -*- """ Virtual method table types and ordering. """ from numba import error from numba.typesystem import * #------------------------------------------------------------------------ # Virtual Method Ordering #------------------------------------------------------------------------ def unordered(parent_vtables, methoddict): return methoddict.itervalues() def extending(parent_vtables, methoddict): """ Order the virtual methods according to the given parent vtables, i.e. we can only extend existing vtables. """ if not parent_vtables: return unordered(parent_vtables, methoddict) parents = sorted(parent_vtables, key=lambda vtab: len(vtab.methoddict)) biggest_vtab = parents[-1] appending_methods = set(methoddict) - set(biggest_vtab.methodnames) return biggest_vtab.methodnames + list(appending_methods) # ______________________________________________________________________ # Validate Virtual Method Order def validate_vtab_compatibility(parent_vtables, vtab): parents = sorted(parent_vtables, key=lambda vtab: len(vtab.methoddict)) vtabs = parents + [vtab] for vtab_smaller, vtab_bigger in zip(vtabs, vtabs[1:]): names1 = vtab_smaller.methodnames names2 = vtab_bigger.methodnames[len(vtab_smaller.methodnames)] if names1 != names2: raise error.NumbaError( "Cannot create compatible method ordering for " "base classes '%s' and '%s'" % ( vtab_smaller.py_class.__name__, vtab_bigger.py_class.__name__)) #------------------------------------------------------------------------ # Virtual Method Table Type #------------------------------------------------------------------------ class VTabType(NumbaType): """ Virtual method table type. """ def __init__(self, py_class, parents): self.py_class = py_class # List of parent vtable types self.parents = parents # method_name -> Method self.methoddict = {} # Set once create_method_ordering is called, # list of ordered method names self.methodnames = None def create_method_ordering(self, ordering=unordered): """ Create a consistent method ordering with the base types. ordering ∈ { unordered, extending, ... } """ self.methodnames = ordering(self.parents, self.methoddict) def add_method(self, method): """ Add a method to the vtab type and verify it with any parent method signatures. """ if method.name in self.methoddict: # Patch current signature after type inference signature = self.get_signature(method.name) assert method.signature.args == signature.args if signature.return_type is None: signature.return_type = method.signature.return_type else: assert signature.return_type == method.signature.return_type, \ method.signature self.methoddict[method.name] = method def get_signature(self, method_name): "Get the signature for the given method name. Returns ExtMethodType" method = self.methoddict[method_name] return method.signature
<commit_before><commit_msg>Add virtual method table type and ordering<commit_after>
# -*- coding: utf-8 -*- """ Virtual method table types and ordering. """ from numba import error from numba.typesystem import * #------------------------------------------------------------------------ # Virtual Method Ordering #------------------------------------------------------------------------ def unordered(parent_vtables, methoddict): return methoddict.itervalues() def extending(parent_vtables, methoddict): """ Order the virtual methods according to the given parent vtables, i.e. we can only extend existing vtables. """ if not parent_vtables: return unordered(parent_vtables, methoddict) parents = sorted(parent_vtables, key=lambda vtab: len(vtab.methoddict)) biggest_vtab = parents[-1] appending_methods = set(methoddict) - set(biggest_vtab.methodnames) return biggest_vtab.methodnames + list(appending_methods) # ______________________________________________________________________ # Validate Virtual Method Order def validate_vtab_compatibility(parent_vtables, vtab): parents = sorted(parent_vtables, key=lambda vtab: len(vtab.methoddict)) vtabs = parents + [vtab] for vtab_smaller, vtab_bigger in zip(vtabs, vtabs[1:]): names1 = vtab_smaller.methodnames names2 = vtab_bigger.methodnames[len(vtab_smaller.methodnames)] if names1 != names2: raise error.NumbaError( "Cannot create compatible method ordering for " "base classes '%s' and '%s'" % ( vtab_smaller.py_class.__name__, vtab_bigger.py_class.__name__)) #------------------------------------------------------------------------ # Virtual Method Table Type #------------------------------------------------------------------------ class VTabType(NumbaType): """ Virtual method table type. """ def __init__(self, py_class, parents): self.py_class = py_class # List of parent vtable types self.parents = parents # method_name -> Method self.methoddict = {} # Set once create_method_ordering is called, # list of ordered method names self.methodnames = None def create_method_ordering(self, ordering=unordered): """ Create a consistent method ordering with the base types. ordering ∈ { unordered, extending, ... } """ self.methodnames = ordering(self.parents, self.methoddict) def add_method(self, method): """ Add a method to the vtab type and verify it with any parent method signatures. """ if method.name in self.methoddict: # Patch current signature after type inference signature = self.get_signature(method.name) assert method.signature.args == signature.args if signature.return_type is None: signature.return_type = method.signature.return_type else: assert signature.return_type == method.signature.return_type, \ method.signature self.methoddict[method.name] = method def get_signature(self, method_name): "Get the signature for the given method name. Returns ExtMethodType" method = self.methoddict[method_name] return method.signature
Add virtual method table type and ordering# -*- coding: utf-8 -*- """ Virtual method table types and ordering. """ from numba import error from numba.typesystem import * #------------------------------------------------------------------------ # Virtual Method Ordering #------------------------------------------------------------------------ def unordered(parent_vtables, methoddict): return methoddict.itervalues() def extending(parent_vtables, methoddict): """ Order the virtual methods according to the given parent vtables, i.e. we can only extend existing vtables. """ if not parent_vtables: return unordered(parent_vtables, methoddict) parents = sorted(parent_vtables, key=lambda vtab: len(vtab.methoddict)) biggest_vtab = parents[-1] appending_methods = set(methoddict) - set(biggest_vtab.methodnames) return biggest_vtab.methodnames + list(appending_methods) # ______________________________________________________________________ # Validate Virtual Method Order def validate_vtab_compatibility(parent_vtables, vtab): parents = sorted(parent_vtables, key=lambda vtab: len(vtab.methoddict)) vtabs = parents + [vtab] for vtab_smaller, vtab_bigger in zip(vtabs, vtabs[1:]): names1 = vtab_smaller.methodnames names2 = vtab_bigger.methodnames[len(vtab_smaller.methodnames)] if names1 != names2: raise error.NumbaError( "Cannot create compatible method ordering for " "base classes '%s' and '%s'" % ( vtab_smaller.py_class.__name__, vtab_bigger.py_class.__name__)) #------------------------------------------------------------------------ # Virtual Method Table Type #------------------------------------------------------------------------ class VTabType(NumbaType): """ Virtual method table type. """ def __init__(self, py_class, parents): self.py_class = py_class # List of parent vtable types self.parents = parents # method_name -> Method self.methoddict = {} # Set once create_method_ordering is called, # list of ordered method names self.methodnames = None def create_method_ordering(self, ordering=unordered): """ Create a consistent method ordering with the base types. ordering ∈ { unordered, extending, ... } """ self.methodnames = ordering(self.parents, self.methoddict) def add_method(self, method): """ Add a method to the vtab type and verify it with any parent method signatures. """ if method.name in self.methoddict: # Patch current signature after type inference signature = self.get_signature(method.name) assert method.signature.args == signature.args if signature.return_type is None: signature.return_type = method.signature.return_type else: assert signature.return_type == method.signature.return_type, \ method.signature self.methoddict[method.name] = method def get_signature(self, method_name): "Get the signature for the given method name. Returns ExtMethodType" method = self.methoddict[method_name] return method.signature
<commit_before><commit_msg>Add virtual method table type and ordering<commit_after># -*- coding: utf-8 -*- """ Virtual method table types and ordering. """ from numba import error from numba.typesystem import * #------------------------------------------------------------------------ # Virtual Method Ordering #------------------------------------------------------------------------ def unordered(parent_vtables, methoddict): return methoddict.itervalues() def extending(parent_vtables, methoddict): """ Order the virtual methods according to the given parent vtables, i.e. we can only extend existing vtables. """ if not parent_vtables: return unordered(parent_vtables, methoddict) parents = sorted(parent_vtables, key=lambda vtab: len(vtab.methoddict)) biggest_vtab = parents[-1] appending_methods = set(methoddict) - set(biggest_vtab.methodnames) return biggest_vtab.methodnames + list(appending_methods) # ______________________________________________________________________ # Validate Virtual Method Order def validate_vtab_compatibility(parent_vtables, vtab): parents = sorted(parent_vtables, key=lambda vtab: len(vtab.methoddict)) vtabs = parents + [vtab] for vtab_smaller, vtab_bigger in zip(vtabs, vtabs[1:]): names1 = vtab_smaller.methodnames names2 = vtab_bigger.methodnames[len(vtab_smaller.methodnames)] if names1 != names2: raise error.NumbaError( "Cannot create compatible method ordering for " "base classes '%s' and '%s'" % ( vtab_smaller.py_class.__name__, vtab_bigger.py_class.__name__)) #------------------------------------------------------------------------ # Virtual Method Table Type #------------------------------------------------------------------------ class VTabType(NumbaType): """ Virtual method table type. """ def __init__(self, py_class, parents): self.py_class = py_class # List of parent vtable types self.parents = parents # method_name -> Method self.methoddict = {} # Set once create_method_ordering is called, # list of ordered method names self.methodnames = None def create_method_ordering(self, ordering=unordered): """ Create a consistent method ordering with the base types. ordering ∈ { unordered, extending, ... } """ self.methodnames = ordering(self.parents, self.methoddict) def add_method(self, method): """ Add a method to the vtab type and verify it with any parent method signatures. """ if method.name in self.methoddict: # Patch current signature after type inference signature = self.get_signature(method.name) assert method.signature.args == signature.args if signature.return_type is None: signature.return_type = method.signature.return_type else: assert signature.return_type == method.signature.return_type, \ method.signature self.methoddict[method.name] = method def get_signature(self, method_name): "Get the signature for the given method name. Returns ExtMethodType" method = self.methoddict[method_name] return method.signature
faea42e32ddcaaa398214d6775fc521c67a3a0f8
dev-tools/get-bwc-version.py
dev-tools/get-bwc-version.py
# Licensed to Elasticsearch under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on # an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. ''' Downloads and extracts elasticsearch for backwards compatibility tests. ''' import argparse import os import platform import shutil import subprocess import urllib.request import zipfile def parse_config(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--path', metavar='DIR', default='./backwards', help='Where to extract elasticsearch') parser.add_argument('--force', action='store_true', default=False, help='Delete and redownload if the version already exists') parser.add_argument('version', metavar='X.Y.Z', help='Version of elasticsearch to grab') return parser.parse_args() def main(): c = parse_config() if not os.path.exists(c.path): print('Creating %s' % c.path) os.mkdir(c.path) is_windows = platform.system() == 'Windows' os.chdir(c.path) version_dir = 'elasticsearch-%s' % c.version if os.path.exists(version_dir): if c.force: print('Removing old download %s' % version_dir) shutil.rmtree(version_dir) else: print('Version %s exists at %s' % (c.version, version_dir)) return # before 1.4.0, the zip file contains windows scripts, and tar.gz contained *nix scripts if is_windows: filename = '%s.zip' % version_dir else: filename = '%s.tar.gz' % version_dir url = 'https://download.elasticsearch.org/elasticsearch/elasticsearch/%s' % filename print('Downloading %s' % url) urllib.request.urlretrieve(url, filename) print('Extracting to %s' % version_dir) if is_windows: archive = zipfile.ZipFile(filename) archive.extractall() else: # for some reason python's tarfile module has trouble with ES tgz? subprocess.check_call('tar -xzf %s' % filename, shell=True) print('Cleaning up %s' % filename) os.remove(filename) if __name__ == '__main__': try: main() except KeyboardInterrupt: print('Ctrl-C caught, exiting')
Add script to grab ES version for BWC tests.
Tools: Add script to grab ES version for BWC tests. closes #7653
Python
apache-2.0
fubuki/elasticsearch,fubuki/elasticsearch,aparo/elasticsearch,fubuki/elasticsearch,fubuki/elasticsearch,aparo/elasticsearch,fubuki/elasticsearch,aparo/elasticsearch,fubuki/elasticsearch,aparo/elasticsearch,aparo/elasticsearch,aparo/elasticsearch
Tools: Add script to grab ES version for BWC tests. closes #7653
# Licensed to Elasticsearch under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on # an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. ''' Downloads and extracts elasticsearch for backwards compatibility tests. ''' import argparse import os import platform import shutil import subprocess import urllib.request import zipfile def parse_config(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--path', metavar='DIR', default='./backwards', help='Where to extract elasticsearch') parser.add_argument('--force', action='store_true', default=False, help='Delete and redownload if the version already exists') parser.add_argument('version', metavar='X.Y.Z', help='Version of elasticsearch to grab') return parser.parse_args() def main(): c = parse_config() if not os.path.exists(c.path): print('Creating %s' % c.path) os.mkdir(c.path) is_windows = platform.system() == 'Windows' os.chdir(c.path) version_dir = 'elasticsearch-%s' % c.version if os.path.exists(version_dir): if c.force: print('Removing old download %s' % version_dir) shutil.rmtree(version_dir) else: print('Version %s exists at %s' % (c.version, version_dir)) return # before 1.4.0, the zip file contains windows scripts, and tar.gz contained *nix scripts if is_windows: filename = '%s.zip' % version_dir else: filename = '%s.tar.gz' % version_dir url = 'https://download.elasticsearch.org/elasticsearch/elasticsearch/%s' % filename print('Downloading %s' % url) urllib.request.urlretrieve(url, filename) print('Extracting to %s' % version_dir) if is_windows: archive = zipfile.ZipFile(filename) archive.extractall() else: # for some reason python's tarfile module has trouble with ES tgz? subprocess.check_call('tar -xzf %s' % filename, shell=True) print('Cleaning up %s' % filename) os.remove(filename) if __name__ == '__main__': try: main() except KeyboardInterrupt: print('Ctrl-C caught, exiting')
<commit_before><commit_msg>Tools: Add script to grab ES version for BWC tests. closes #7653<commit_after>
# Licensed to Elasticsearch under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on # an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. ''' Downloads and extracts elasticsearch for backwards compatibility tests. ''' import argparse import os import platform import shutil import subprocess import urllib.request import zipfile def parse_config(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--path', metavar='DIR', default='./backwards', help='Where to extract elasticsearch') parser.add_argument('--force', action='store_true', default=False, help='Delete and redownload if the version already exists') parser.add_argument('version', metavar='X.Y.Z', help='Version of elasticsearch to grab') return parser.parse_args() def main(): c = parse_config() if not os.path.exists(c.path): print('Creating %s' % c.path) os.mkdir(c.path) is_windows = platform.system() == 'Windows' os.chdir(c.path) version_dir = 'elasticsearch-%s' % c.version if os.path.exists(version_dir): if c.force: print('Removing old download %s' % version_dir) shutil.rmtree(version_dir) else: print('Version %s exists at %s' % (c.version, version_dir)) return # before 1.4.0, the zip file contains windows scripts, and tar.gz contained *nix scripts if is_windows: filename = '%s.zip' % version_dir else: filename = '%s.tar.gz' % version_dir url = 'https://download.elasticsearch.org/elasticsearch/elasticsearch/%s' % filename print('Downloading %s' % url) urllib.request.urlretrieve(url, filename) print('Extracting to %s' % version_dir) if is_windows: archive = zipfile.ZipFile(filename) archive.extractall() else: # for some reason python's tarfile module has trouble with ES tgz? subprocess.check_call('tar -xzf %s' % filename, shell=True) print('Cleaning up %s' % filename) os.remove(filename) if __name__ == '__main__': try: main() except KeyboardInterrupt: print('Ctrl-C caught, exiting')
Tools: Add script to grab ES version for BWC tests. closes #7653# Licensed to Elasticsearch under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on # an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. ''' Downloads and extracts elasticsearch for backwards compatibility tests. ''' import argparse import os import platform import shutil import subprocess import urllib.request import zipfile def parse_config(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--path', metavar='DIR', default='./backwards', help='Where to extract elasticsearch') parser.add_argument('--force', action='store_true', default=False, help='Delete and redownload if the version already exists') parser.add_argument('version', metavar='X.Y.Z', help='Version of elasticsearch to grab') return parser.parse_args() def main(): c = parse_config() if not os.path.exists(c.path): print('Creating %s' % c.path) os.mkdir(c.path) is_windows = platform.system() == 'Windows' os.chdir(c.path) version_dir = 'elasticsearch-%s' % c.version if os.path.exists(version_dir): if c.force: print('Removing old download %s' % version_dir) shutil.rmtree(version_dir) else: print('Version %s exists at %s' % (c.version, version_dir)) return # before 1.4.0, the zip file contains windows scripts, and tar.gz contained *nix scripts if is_windows: filename = '%s.zip' % version_dir else: filename = '%s.tar.gz' % version_dir url = 'https://download.elasticsearch.org/elasticsearch/elasticsearch/%s' % filename print('Downloading %s' % url) urllib.request.urlretrieve(url, filename) print('Extracting to %s' % version_dir) if is_windows: archive = zipfile.ZipFile(filename) archive.extractall() else: # for some reason python's tarfile module has trouble with ES tgz? subprocess.check_call('tar -xzf %s' % filename, shell=True) print('Cleaning up %s' % filename) os.remove(filename) if __name__ == '__main__': try: main() except KeyboardInterrupt: print('Ctrl-C caught, exiting')
<commit_before><commit_msg>Tools: Add script to grab ES version for BWC tests. closes #7653<commit_after># Licensed to Elasticsearch under one or more contributor # license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright # ownership. Elasticsearch licenses this file to you under # the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on # an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. ''' Downloads and extracts elasticsearch for backwards compatibility tests. ''' import argparse import os import platform import shutil import subprocess import urllib.request import zipfile def parse_config(): parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('--path', metavar='DIR', default='./backwards', help='Where to extract elasticsearch') parser.add_argument('--force', action='store_true', default=False, help='Delete and redownload if the version already exists') parser.add_argument('version', metavar='X.Y.Z', help='Version of elasticsearch to grab') return parser.parse_args() def main(): c = parse_config() if not os.path.exists(c.path): print('Creating %s' % c.path) os.mkdir(c.path) is_windows = platform.system() == 'Windows' os.chdir(c.path) version_dir = 'elasticsearch-%s' % c.version if os.path.exists(version_dir): if c.force: print('Removing old download %s' % version_dir) shutil.rmtree(version_dir) else: print('Version %s exists at %s' % (c.version, version_dir)) return # before 1.4.0, the zip file contains windows scripts, and tar.gz contained *nix scripts if is_windows: filename = '%s.zip' % version_dir else: filename = '%s.tar.gz' % version_dir url = 'https://download.elasticsearch.org/elasticsearch/elasticsearch/%s' % filename print('Downloading %s' % url) urllib.request.urlretrieve(url, filename) print('Extracting to %s' % version_dir) if is_windows: archive = zipfile.ZipFile(filename) archive.extractall() else: # for some reason python's tarfile module has trouble with ES tgz? subprocess.check_call('tar -xzf %s' % filename, shell=True) print('Cleaning up %s' % filename) os.remove(filename) if __name__ == '__main__': try: main() except KeyboardInterrupt: print('Ctrl-C caught, exiting')
047e7c017f40b7c23b80151db137ed73230911ce
Proxmox/commands.py
Proxmox/commands.py
from django.http import HttpResponse def runcommand(request, proxmox): command = request.GET.get('command', 'none') if command == 'addcomplete': nodeLocation = request.POST.get('node', '') vmid = request.POST.get('vmid', '') template = request.POST.get('template', '') hostname = request.POST.get('hostname', '') storage = request.POST.get('storage', 'local') memory = request.POST.get('memory', '') swap = request.POST.get('swap', '') cpus = request.POST.get('cpu', '') disk = request.POST.get('disk', '') password = request.POST.get('serverpw', '') ipaddress = request.POST.get('ipaddress', '') if vmid == '': vmid = 100 ids = [] for node in proxmox.nodes.get(): for vm in proxmox.nodes(node['node']).openvz.get(): ids.append(int(vm['vmid'])) for vm in proxmox.nodes(node['node']).qemu.get(): ids.append(int(vm['vmid'])) while (vmid in ids): vmid = vmid + 1 node = proxmox.nodes(nodeLocation) if node != None: node.openvz.create(vmid=int(vmid), ostemplate=str(template), hostname=str(hostname), storage=str(storage), memory=int(memory), swap=int(swap), cpus=int(cpus), disk=int(disk), password=str(password), ip_address=str(ipaddress)) else: return HttpResponse("Node Not Found", status=400) return HttpResponse("Ok")
Add Server wizard now working
Add Server wizard now working
Python
mit
Tomcuzz/OctaHomeAutomation,Tomcuzz/OctaHomeAutomation,Tomcuzz/OctaHomeAutomation,Tomcuzz/OctaHomeAutomation
Add Server wizard now working
from django.http import HttpResponse def runcommand(request, proxmox): command = request.GET.get('command', 'none') if command == 'addcomplete': nodeLocation = request.POST.get('node', '') vmid = request.POST.get('vmid', '') template = request.POST.get('template', '') hostname = request.POST.get('hostname', '') storage = request.POST.get('storage', 'local') memory = request.POST.get('memory', '') swap = request.POST.get('swap', '') cpus = request.POST.get('cpu', '') disk = request.POST.get('disk', '') password = request.POST.get('serverpw', '') ipaddress = request.POST.get('ipaddress', '') if vmid == '': vmid = 100 ids = [] for node in proxmox.nodes.get(): for vm in proxmox.nodes(node['node']).openvz.get(): ids.append(int(vm['vmid'])) for vm in proxmox.nodes(node['node']).qemu.get(): ids.append(int(vm['vmid'])) while (vmid in ids): vmid = vmid + 1 node = proxmox.nodes(nodeLocation) if node != None: node.openvz.create(vmid=int(vmid), ostemplate=str(template), hostname=str(hostname), storage=str(storage), memory=int(memory), swap=int(swap), cpus=int(cpus), disk=int(disk), password=str(password), ip_address=str(ipaddress)) else: return HttpResponse("Node Not Found", status=400) return HttpResponse("Ok")
<commit_before><commit_msg>Add Server wizard now working<commit_after>
from django.http import HttpResponse def runcommand(request, proxmox): command = request.GET.get('command', 'none') if command == 'addcomplete': nodeLocation = request.POST.get('node', '') vmid = request.POST.get('vmid', '') template = request.POST.get('template', '') hostname = request.POST.get('hostname', '') storage = request.POST.get('storage', 'local') memory = request.POST.get('memory', '') swap = request.POST.get('swap', '') cpus = request.POST.get('cpu', '') disk = request.POST.get('disk', '') password = request.POST.get('serverpw', '') ipaddress = request.POST.get('ipaddress', '') if vmid == '': vmid = 100 ids = [] for node in proxmox.nodes.get(): for vm in proxmox.nodes(node['node']).openvz.get(): ids.append(int(vm['vmid'])) for vm in proxmox.nodes(node['node']).qemu.get(): ids.append(int(vm['vmid'])) while (vmid in ids): vmid = vmid + 1 node = proxmox.nodes(nodeLocation) if node != None: node.openvz.create(vmid=int(vmid), ostemplate=str(template), hostname=str(hostname), storage=str(storage), memory=int(memory), swap=int(swap), cpus=int(cpus), disk=int(disk), password=str(password), ip_address=str(ipaddress)) else: return HttpResponse("Node Not Found", status=400) return HttpResponse("Ok")
Add Server wizard now workingfrom django.http import HttpResponse def runcommand(request, proxmox): command = request.GET.get('command', 'none') if command == 'addcomplete': nodeLocation = request.POST.get('node', '') vmid = request.POST.get('vmid', '') template = request.POST.get('template', '') hostname = request.POST.get('hostname', '') storage = request.POST.get('storage', 'local') memory = request.POST.get('memory', '') swap = request.POST.get('swap', '') cpus = request.POST.get('cpu', '') disk = request.POST.get('disk', '') password = request.POST.get('serverpw', '') ipaddress = request.POST.get('ipaddress', '') if vmid == '': vmid = 100 ids = [] for node in proxmox.nodes.get(): for vm in proxmox.nodes(node['node']).openvz.get(): ids.append(int(vm['vmid'])) for vm in proxmox.nodes(node['node']).qemu.get(): ids.append(int(vm['vmid'])) while (vmid in ids): vmid = vmid + 1 node = proxmox.nodes(nodeLocation) if node != None: node.openvz.create(vmid=int(vmid), ostemplate=str(template), hostname=str(hostname), storage=str(storage), memory=int(memory), swap=int(swap), cpus=int(cpus), disk=int(disk), password=str(password), ip_address=str(ipaddress)) else: return HttpResponse("Node Not Found", status=400) return HttpResponse("Ok")
<commit_before><commit_msg>Add Server wizard now working<commit_after>from django.http import HttpResponse def runcommand(request, proxmox): command = request.GET.get('command', 'none') if command == 'addcomplete': nodeLocation = request.POST.get('node', '') vmid = request.POST.get('vmid', '') template = request.POST.get('template', '') hostname = request.POST.get('hostname', '') storage = request.POST.get('storage', 'local') memory = request.POST.get('memory', '') swap = request.POST.get('swap', '') cpus = request.POST.get('cpu', '') disk = request.POST.get('disk', '') password = request.POST.get('serverpw', '') ipaddress = request.POST.get('ipaddress', '') if vmid == '': vmid = 100 ids = [] for node in proxmox.nodes.get(): for vm in proxmox.nodes(node['node']).openvz.get(): ids.append(int(vm['vmid'])) for vm in proxmox.nodes(node['node']).qemu.get(): ids.append(int(vm['vmid'])) while (vmid in ids): vmid = vmid + 1 node = proxmox.nodes(nodeLocation) if node != None: node.openvz.create(vmid=int(vmid), ostemplate=str(template), hostname=str(hostname), storage=str(storage), memory=int(memory), swap=int(swap), cpus=int(cpus), disk=int(disk), password=str(password), ip_address=str(ipaddress)) else: return HttpResponse("Node Not Found", status=400) return HttpResponse("Ok")
420ec092626d72ea465ee33115ff6635b39f0cd3
src/julia/tests/test_runtests.py
src/julia/tests/test_runtests.py
import subprocess import sys from textwrap import dedent from julia.core import _enviorn from .test_compatible_exe import run def test_runtests_failure(tmp_path): testfile = tmp_path / "test.py" testcode = u""" def test_THIS_TEST_MUST_FAIL(): assert False """ testfile.write_text(dedent(testcode)) proc = run( [ sys.executable, "-m", "julia.runtests", "--", str(testfile), "--no-julia", "-k", "test_THIS_TEST_MUST_FAIL", ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, env=_enviorn, ) assert proc.returncode == 1 assert "1 failed" in proc.stdout
Test julia.runtests can fail properly
Test julia.runtests can fail properly
Python
mit
JuliaPy/pyjulia,JuliaLang/pyjulia,JuliaPy/pyjulia
Test julia.runtests can fail properly
import subprocess import sys from textwrap import dedent from julia.core import _enviorn from .test_compatible_exe import run def test_runtests_failure(tmp_path): testfile = tmp_path / "test.py" testcode = u""" def test_THIS_TEST_MUST_FAIL(): assert False """ testfile.write_text(dedent(testcode)) proc = run( [ sys.executable, "-m", "julia.runtests", "--", str(testfile), "--no-julia", "-k", "test_THIS_TEST_MUST_FAIL", ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, env=_enviorn, ) assert proc.returncode == 1 assert "1 failed" in proc.stdout
<commit_before><commit_msg>Test julia.runtests can fail properly<commit_after>
import subprocess import sys from textwrap import dedent from julia.core import _enviorn from .test_compatible_exe import run def test_runtests_failure(tmp_path): testfile = tmp_path / "test.py" testcode = u""" def test_THIS_TEST_MUST_FAIL(): assert False """ testfile.write_text(dedent(testcode)) proc = run( [ sys.executable, "-m", "julia.runtests", "--", str(testfile), "--no-julia", "-k", "test_THIS_TEST_MUST_FAIL", ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, env=_enviorn, ) assert proc.returncode == 1 assert "1 failed" in proc.stdout
Test julia.runtests can fail properlyimport subprocess import sys from textwrap import dedent from julia.core import _enviorn from .test_compatible_exe import run def test_runtests_failure(tmp_path): testfile = tmp_path / "test.py" testcode = u""" def test_THIS_TEST_MUST_FAIL(): assert False """ testfile.write_text(dedent(testcode)) proc = run( [ sys.executable, "-m", "julia.runtests", "--", str(testfile), "--no-julia", "-k", "test_THIS_TEST_MUST_FAIL", ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, env=_enviorn, ) assert proc.returncode == 1 assert "1 failed" in proc.stdout
<commit_before><commit_msg>Test julia.runtests can fail properly<commit_after>import subprocess import sys from textwrap import dedent from julia.core import _enviorn from .test_compatible_exe import run def test_runtests_failure(tmp_path): testfile = tmp_path / "test.py" testcode = u""" def test_THIS_TEST_MUST_FAIL(): assert False """ testfile.write_text(dedent(testcode)) proc = run( [ sys.executable, "-m", "julia.runtests", "--", str(testfile), "--no-julia", "-k", "test_THIS_TEST_MUST_FAIL", ], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, env=_enviorn, ) assert proc.returncode == 1 assert "1 failed" in proc.stdout
b7af7aa6cb15421b140adea41c4436d1960367f9
bin/validate_file.py
bin/validate_file.py
#!/usr/bin/env python3 import argparse import validator.validator as vv parser = argparse.ArgumentParser() parser.add_argument('--config', help="configuration file for file validator") parser.add_argument('--file', help="file to validate") args = parser.parse_args() v = vv.Validate() v.validate_file(args.config, args.file)
Add validator scripts which use the validator class
Add validator scripts which use the validator class
Python
mit
iestynpryce/file-validator
Add validator scripts which use the validator class
#!/usr/bin/env python3 import argparse import validator.validator as vv parser = argparse.ArgumentParser() parser.add_argument('--config', help="configuration file for file validator") parser.add_argument('--file', help="file to validate") args = parser.parse_args() v = vv.Validate() v.validate_file(args.config, args.file)
<commit_before><commit_msg>Add validator scripts which use the validator class<commit_after>
#!/usr/bin/env python3 import argparse import validator.validator as vv parser = argparse.ArgumentParser() parser.add_argument('--config', help="configuration file for file validator") parser.add_argument('--file', help="file to validate") args = parser.parse_args() v = vv.Validate() v.validate_file(args.config, args.file)
Add validator scripts which use the validator class#!/usr/bin/env python3 import argparse import validator.validator as vv parser = argparse.ArgumentParser() parser.add_argument('--config', help="configuration file for file validator") parser.add_argument('--file', help="file to validate") args = parser.parse_args() v = vv.Validate() v.validate_file(args.config, args.file)
<commit_before><commit_msg>Add validator scripts which use the validator class<commit_after>#!/usr/bin/env python3 import argparse import validator.validator as vv parser = argparse.ArgumentParser() parser.add_argument('--config', help="configuration file for file validator") parser.add_argument('--file', help="file to validate") args = parser.parse_args() v = vv.Validate() v.validate_file(args.config, args.file)
150248cbcdcc114f996ef6746e74f8cd8294190f
setup.py
setup.py
from setuptools import setup, find_packages # import os, sys def parse_requirements(requirements): # load from requirements.txt with open(requirements) as f: lines = [l for l in f] # remove spaces stripped = map((lambda x: x.strip()), lines) # remove comments nocomments = filter((lambda x: not x.startswith('#')), stripped) # remove empty lines reqs = filter((lambda x: x), nocomments) return reqs PACKAGE_NAME = "dejavu" PACKAGE_VERSION = "0.1" SUMMARY = 'Dejavu Audio Fingerprinting' DESCRIPTION = """Dejavu Audio Fingerprinting""" REQUIREMENTS = parse_requirements("requirements.txt") setup( name=PACKAGE_NAME, version=PACKAGE_VERSION, description=SUMMARY, long_description=DESCRIPTION, author='worldveil', author_email='will.drevo@gmail.com', url='http://github.com/tuxdna/dejavu', license='Apache 2.0', include_package_data=True, packages=find_packages(), platforms=['Any'], install_requires=REQUIREMENTS, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Topic :: Software Development :: Libraries :: Python Modules', ], )
from setuptools import setup, find_packages # import os, sys def parse_requirements(requirements): # load from requirements.txt with open(requirements) as f: lines = [l for l in f] # remove spaces stripped = map((lambda x: x.strip()), lines) # remove comments nocomments = filter((lambda x: not x.startswith('#')), stripped) # remove empty lines reqs = filter((lambda x: x), nocomments) return reqs PACKAGE_NAME = "PyDejavu" PACKAGE_VERSION = "0.1" SUMMARY = 'Dejavu Audio Fingerprinting' DESCRIPTION = """Dejavu Audio Fingerprinting""" REQUIREMENTS = parse_requirements("requirements.txt") setup( name=PACKAGE_NAME, version=PACKAGE_VERSION, description=SUMMARY, long_description=DESCRIPTION, author='worldveil', author_email='will.drevo@gmail.com', url='http://github.com/tuxdna/dejavu', license='Apache 2.0', include_package_data=True, packages=find_packages(), platforms=['Any'], install_requires=REQUIREMENTS, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Topic :: Software Development :: Libraries :: Python Modules', ], )
Update project name from dejavu to PyDejavu
Update project name from dejavu to PyDejavu
Python
mit
IskrenStanislavov/dejavu,worldveil/dejavu,shantanoo/dejavu,thanhquanky/dejavu,nicktimko/dejavu,VikramTiwari/dejavu,nmerad/dejavu,karthiks1995/dejavu,vasanthkalingeri/dejavu,snd/dejavu,davidmashburn/dejavu,midroid/dejavu,davidmashburn/dejavu,nmtoan91/dejavu,thanhquanky/dejavu,snd/dejavu,VikramTiwari/dejavu,karthiks1995/dejavu,nmtoan91/dejavu,IskrenStanislavov/dejavu,midroid/dejavu,pierater/dejavu,nicktimko/dejavu,pierater/dejavu,nmerad/dejavu,worldveil/dejavu,shantanoo/dejavu,vasanthkalingeri/dejavu
from setuptools import setup, find_packages # import os, sys def parse_requirements(requirements): # load from requirements.txt with open(requirements) as f: lines = [l for l in f] # remove spaces stripped = map((lambda x: x.strip()), lines) # remove comments nocomments = filter((lambda x: not x.startswith('#')), stripped) # remove empty lines reqs = filter((lambda x: x), nocomments) return reqs PACKAGE_NAME = "dejavu" PACKAGE_VERSION = "0.1" SUMMARY = 'Dejavu Audio Fingerprinting' DESCRIPTION = """Dejavu Audio Fingerprinting""" REQUIREMENTS = parse_requirements("requirements.txt") setup( name=PACKAGE_NAME, version=PACKAGE_VERSION, description=SUMMARY, long_description=DESCRIPTION, author='worldveil', author_email='will.drevo@gmail.com', url='http://github.com/tuxdna/dejavu', license='Apache 2.0', include_package_data=True, packages=find_packages(), platforms=['Any'], install_requires=REQUIREMENTS, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Topic :: Software Development :: Libraries :: Python Modules', ], ) Update project name from dejavu to PyDejavu
from setuptools import setup, find_packages # import os, sys def parse_requirements(requirements): # load from requirements.txt with open(requirements) as f: lines = [l for l in f] # remove spaces stripped = map((lambda x: x.strip()), lines) # remove comments nocomments = filter((lambda x: not x.startswith('#')), stripped) # remove empty lines reqs = filter((lambda x: x), nocomments) return reqs PACKAGE_NAME = "PyDejavu" PACKAGE_VERSION = "0.1" SUMMARY = 'Dejavu Audio Fingerprinting' DESCRIPTION = """Dejavu Audio Fingerprinting""" REQUIREMENTS = parse_requirements("requirements.txt") setup( name=PACKAGE_NAME, version=PACKAGE_VERSION, description=SUMMARY, long_description=DESCRIPTION, author='worldveil', author_email='will.drevo@gmail.com', url='http://github.com/tuxdna/dejavu', license='Apache 2.0', include_package_data=True, packages=find_packages(), platforms=['Any'], install_requires=REQUIREMENTS, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Topic :: Software Development :: Libraries :: Python Modules', ], )
<commit_before>from setuptools import setup, find_packages # import os, sys def parse_requirements(requirements): # load from requirements.txt with open(requirements) as f: lines = [l for l in f] # remove spaces stripped = map((lambda x: x.strip()), lines) # remove comments nocomments = filter((lambda x: not x.startswith('#')), stripped) # remove empty lines reqs = filter((lambda x: x), nocomments) return reqs PACKAGE_NAME = "dejavu" PACKAGE_VERSION = "0.1" SUMMARY = 'Dejavu Audio Fingerprinting' DESCRIPTION = """Dejavu Audio Fingerprinting""" REQUIREMENTS = parse_requirements("requirements.txt") setup( name=PACKAGE_NAME, version=PACKAGE_VERSION, description=SUMMARY, long_description=DESCRIPTION, author='worldveil', author_email='will.drevo@gmail.com', url='http://github.com/tuxdna/dejavu', license='Apache 2.0', include_package_data=True, packages=find_packages(), platforms=['Any'], install_requires=REQUIREMENTS, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Topic :: Software Development :: Libraries :: Python Modules', ], ) <commit_msg>Update project name from dejavu to PyDejavu<commit_after>
from setuptools import setup, find_packages # import os, sys def parse_requirements(requirements): # load from requirements.txt with open(requirements) as f: lines = [l for l in f] # remove spaces stripped = map((lambda x: x.strip()), lines) # remove comments nocomments = filter((lambda x: not x.startswith('#')), stripped) # remove empty lines reqs = filter((lambda x: x), nocomments) return reqs PACKAGE_NAME = "PyDejavu" PACKAGE_VERSION = "0.1" SUMMARY = 'Dejavu Audio Fingerprinting' DESCRIPTION = """Dejavu Audio Fingerprinting""" REQUIREMENTS = parse_requirements("requirements.txt") setup( name=PACKAGE_NAME, version=PACKAGE_VERSION, description=SUMMARY, long_description=DESCRIPTION, author='worldveil', author_email='will.drevo@gmail.com', url='http://github.com/tuxdna/dejavu', license='Apache 2.0', include_package_data=True, packages=find_packages(), platforms=['Any'], install_requires=REQUIREMENTS, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Topic :: Software Development :: Libraries :: Python Modules', ], )
from setuptools import setup, find_packages # import os, sys def parse_requirements(requirements): # load from requirements.txt with open(requirements) as f: lines = [l for l in f] # remove spaces stripped = map((lambda x: x.strip()), lines) # remove comments nocomments = filter((lambda x: not x.startswith('#')), stripped) # remove empty lines reqs = filter((lambda x: x), nocomments) return reqs PACKAGE_NAME = "dejavu" PACKAGE_VERSION = "0.1" SUMMARY = 'Dejavu Audio Fingerprinting' DESCRIPTION = """Dejavu Audio Fingerprinting""" REQUIREMENTS = parse_requirements("requirements.txt") setup( name=PACKAGE_NAME, version=PACKAGE_VERSION, description=SUMMARY, long_description=DESCRIPTION, author='worldveil', author_email='will.drevo@gmail.com', url='http://github.com/tuxdna/dejavu', license='Apache 2.0', include_package_data=True, packages=find_packages(), platforms=['Any'], install_requires=REQUIREMENTS, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Topic :: Software Development :: Libraries :: Python Modules', ], ) Update project name from dejavu to PyDejavufrom setuptools import setup, find_packages # import os, sys def parse_requirements(requirements): # load from requirements.txt with open(requirements) as f: lines = [l for l in f] # remove spaces stripped = map((lambda x: x.strip()), lines) # remove comments nocomments = filter((lambda x: not x.startswith('#')), stripped) # remove empty lines reqs = filter((lambda x: x), nocomments) return reqs PACKAGE_NAME = "PyDejavu" PACKAGE_VERSION = "0.1" SUMMARY = 'Dejavu Audio Fingerprinting' DESCRIPTION = """Dejavu Audio Fingerprinting""" REQUIREMENTS = parse_requirements("requirements.txt") setup( name=PACKAGE_NAME, version=PACKAGE_VERSION, description=SUMMARY, long_description=DESCRIPTION, author='worldveil', author_email='will.drevo@gmail.com', url='http://github.com/tuxdna/dejavu', license='Apache 2.0', include_package_data=True, packages=find_packages(), platforms=['Any'], install_requires=REQUIREMENTS, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Topic :: Software Development :: Libraries :: Python Modules', ], )
<commit_before>from setuptools import setup, find_packages # import os, sys def parse_requirements(requirements): # load from requirements.txt with open(requirements) as f: lines = [l for l in f] # remove spaces stripped = map((lambda x: x.strip()), lines) # remove comments nocomments = filter((lambda x: not x.startswith('#')), stripped) # remove empty lines reqs = filter((lambda x: x), nocomments) return reqs PACKAGE_NAME = "dejavu" PACKAGE_VERSION = "0.1" SUMMARY = 'Dejavu Audio Fingerprinting' DESCRIPTION = """Dejavu Audio Fingerprinting""" REQUIREMENTS = parse_requirements("requirements.txt") setup( name=PACKAGE_NAME, version=PACKAGE_VERSION, description=SUMMARY, long_description=DESCRIPTION, author='worldveil', author_email='will.drevo@gmail.com', url='http://github.com/tuxdna/dejavu', license='Apache 2.0', include_package_data=True, packages=find_packages(), platforms=['Any'], install_requires=REQUIREMENTS, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Topic :: Software Development :: Libraries :: Python Modules', ], ) <commit_msg>Update project name from dejavu to PyDejavu<commit_after>from setuptools import setup, find_packages # import os, sys def parse_requirements(requirements): # load from requirements.txt with open(requirements) as f: lines = [l for l in f] # remove spaces stripped = map((lambda x: x.strip()), lines) # remove comments nocomments = filter((lambda x: not x.startswith('#')), stripped) # remove empty lines reqs = filter((lambda x: x), nocomments) return reqs PACKAGE_NAME = "PyDejavu" PACKAGE_VERSION = "0.1" SUMMARY = 'Dejavu Audio Fingerprinting' DESCRIPTION = """Dejavu Audio Fingerprinting""" REQUIREMENTS = parse_requirements("requirements.txt") setup( name=PACKAGE_NAME, version=PACKAGE_VERSION, description=SUMMARY, long_description=DESCRIPTION, author='worldveil', author_email='will.drevo@gmail.com', url='http://github.com/tuxdna/dejavu', license='Apache 2.0', include_package_data=True, packages=find_packages(), platforms=['Any'], install_requires=REQUIREMENTS, classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', 'Topic :: Software Development :: Libraries :: Python Modules', ], )
bb79b0a66a41106db220b1a0fb0e3bb8db9b5072
utils.py
utils.py
from keras.preprocessing.sequence import pad_sequences import numpy as np def to_one_hot_sequences(x, nb_classes=None, seq_len=30, start=0, stop=None, step=0): if nb_classes is None: nb_classes = _guess_nb_classes(x) if stop is None: stop = len(x) - seq_len if step <= 0: step += seq_len seqs = [x[max(i, 0):i + seq_len] for i in range(start, stop, step)] seqs = pad_sequences(seqs, maxlen=seq_len, value=0) seqs = np.stack([to_one_hot_sequence(seq, nb_classes=nb_classes) for seq in seqs]) return seqs def to_one_hot_sequence(x, nb_classes=None): if nb_classes is None: nb_classes = _guess_nb_classes(x) seq = np.zeros((len(x), nb_classes), dtype=np.bool) for i, j in enumerate(x): if j > 0: seq[i, j - 1] = 1 return seq def _guess_nb_classes(x): return len(set(x) - {0})
Add an utility to transform an integer sequence into one hot sequence and sequences
Add an utility to transform an integer sequence into one hot sequence and sequences
Python
mit
xiongliyang219/physics_music
Add an utility to transform an integer sequence into one hot sequence and sequences
from keras.preprocessing.sequence import pad_sequences import numpy as np def to_one_hot_sequences(x, nb_classes=None, seq_len=30, start=0, stop=None, step=0): if nb_classes is None: nb_classes = _guess_nb_classes(x) if stop is None: stop = len(x) - seq_len if step <= 0: step += seq_len seqs = [x[max(i, 0):i + seq_len] for i in range(start, stop, step)] seqs = pad_sequences(seqs, maxlen=seq_len, value=0) seqs = np.stack([to_one_hot_sequence(seq, nb_classes=nb_classes) for seq in seqs]) return seqs def to_one_hot_sequence(x, nb_classes=None): if nb_classes is None: nb_classes = _guess_nb_classes(x) seq = np.zeros((len(x), nb_classes), dtype=np.bool) for i, j in enumerate(x): if j > 0: seq[i, j - 1] = 1 return seq def _guess_nb_classes(x): return len(set(x) - {0})
<commit_before><commit_msg>Add an utility to transform an integer sequence into one hot sequence and sequences<commit_after>
from keras.preprocessing.sequence import pad_sequences import numpy as np def to_one_hot_sequences(x, nb_classes=None, seq_len=30, start=0, stop=None, step=0): if nb_classes is None: nb_classes = _guess_nb_classes(x) if stop is None: stop = len(x) - seq_len if step <= 0: step += seq_len seqs = [x[max(i, 0):i + seq_len] for i in range(start, stop, step)] seqs = pad_sequences(seqs, maxlen=seq_len, value=0) seqs = np.stack([to_one_hot_sequence(seq, nb_classes=nb_classes) for seq in seqs]) return seqs def to_one_hot_sequence(x, nb_classes=None): if nb_classes is None: nb_classes = _guess_nb_classes(x) seq = np.zeros((len(x), nb_classes), dtype=np.bool) for i, j in enumerate(x): if j > 0: seq[i, j - 1] = 1 return seq def _guess_nb_classes(x): return len(set(x) - {0})
Add an utility to transform an integer sequence into one hot sequence and sequencesfrom keras.preprocessing.sequence import pad_sequences import numpy as np def to_one_hot_sequences(x, nb_classes=None, seq_len=30, start=0, stop=None, step=0): if nb_classes is None: nb_classes = _guess_nb_classes(x) if stop is None: stop = len(x) - seq_len if step <= 0: step += seq_len seqs = [x[max(i, 0):i + seq_len] for i in range(start, stop, step)] seqs = pad_sequences(seqs, maxlen=seq_len, value=0) seqs = np.stack([to_one_hot_sequence(seq, nb_classes=nb_classes) for seq in seqs]) return seqs def to_one_hot_sequence(x, nb_classes=None): if nb_classes is None: nb_classes = _guess_nb_classes(x) seq = np.zeros((len(x), nb_classes), dtype=np.bool) for i, j in enumerate(x): if j > 0: seq[i, j - 1] = 1 return seq def _guess_nb_classes(x): return len(set(x) - {0})
<commit_before><commit_msg>Add an utility to transform an integer sequence into one hot sequence and sequences<commit_after>from keras.preprocessing.sequence import pad_sequences import numpy as np def to_one_hot_sequences(x, nb_classes=None, seq_len=30, start=0, stop=None, step=0): if nb_classes is None: nb_classes = _guess_nb_classes(x) if stop is None: stop = len(x) - seq_len if step <= 0: step += seq_len seqs = [x[max(i, 0):i + seq_len] for i in range(start, stop, step)] seqs = pad_sequences(seqs, maxlen=seq_len, value=0) seqs = np.stack([to_one_hot_sequence(seq, nb_classes=nb_classes) for seq in seqs]) return seqs def to_one_hot_sequence(x, nb_classes=None): if nb_classes is None: nb_classes = _guess_nb_classes(x) seq = np.zeros((len(x), nb_classes), dtype=np.bool) for i, j in enumerate(x): if j > 0: seq[i, j - 1] = 1 return seq def _guess_nb_classes(x): return len(set(x) - {0})
a61a9ab1182eedefcf594f1966272d04f66f41da
webcomictocbz/tests/test_search.py
webcomictocbz/tests/test_search.py
from webcomictocbz.search import search from webcomictocbz.comic import Comic def test_search_searchable_website(): searchable_website = search("https://xkcd.com/1/") assert searchable_website.url == "https://xkcd.com/1/" assert searchable_website.next_page_selector == "//*[@*[contains(., '{}')]]//@href".format("next") assert searchable_website.comic_image_selector == "//*[@*[contains(., '{}')]]//@src".format("comic") def test_search_unsearchable_website(): unsearchable_website = search("https://j-cpelletier.github.io/WebComicToCBZ/1.html") assert unsearchable_website == None
Add tests for search function
Add tests for search function
Python
mit
J-CPelletier/webcomix,J-CPelletier/WebComicToCBZ,J-CPelletier/webcomix
Add tests for search function
from webcomictocbz.search import search from webcomictocbz.comic import Comic def test_search_searchable_website(): searchable_website = search("https://xkcd.com/1/") assert searchable_website.url == "https://xkcd.com/1/" assert searchable_website.next_page_selector == "//*[@*[contains(., '{}')]]//@href".format("next") assert searchable_website.comic_image_selector == "//*[@*[contains(., '{}')]]//@src".format("comic") def test_search_unsearchable_website(): unsearchable_website = search("https://j-cpelletier.github.io/WebComicToCBZ/1.html") assert unsearchable_website == None
<commit_before><commit_msg>Add tests for search function<commit_after>
from webcomictocbz.search import search from webcomictocbz.comic import Comic def test_search_searchable_website(): searchable_website = search("https://xkcd.com/1/") assert searchable_website.url == "https://xkcd.com/1/" assert searchable_website.next_page_selector == "//*[@*[contains(., '{}')]]//@href".format("next") assert searchable_website.comic_image_selector == "//*[@*[contains(., '{}')]]//@src".format("comic") def test_search_unsearchable_website(): unsearchable_website = search("https://j-cpelletier.github.io/WebComicToCBZ/1.html") assert unsearchable_website == None
Add tests for search functionfrom webcomictocbz.search import search from webcomictocbz.comic import Comic def test_search_searchable_website(): searchable_website = search("https://xkcd.com/1/") assert searchable_website.url == "https://xkcd.com/1/" assert searchable_website.next_page_selector == "//*[@*[contains(., '{}')]]//@href".format("next") assert searchable_website.comic_image_selector == "//*[@*[contains(., '{}')]]//@src".format("comic") def test_search_unsearchable_website(): unsearchable_website = search("https://j-cpelletier.github.io/WebComicToCBZ/1.html") assert unsearchable_website == None
<commit_before><commit_msg>Add tests for search function<commit_after>from webcomictocbz.search import search from webcomictocbz.comic import Comic def test_search_searchable_website(): searchable_website = search("https://xkcd.com/1/") assert searchable_website.url == "https://xkcd.com/1/" assert searchable_website.next_page_selector == "//*[@*[contains(., '{}')]]//@href".format("next") assert searchable_website.comic_image_selector == "//*[@*[contains(., '{}')]]//@src".format("comic") def test_search_unsearchable_website(): unsearchable_website = search("https://j-cpelletier.github.io/WebComicToCBZ/1.html") assert unsearchable_website == None
770e732d76f5e4f4dbb9f2d2d87c3b49440d906b
tests/unit/doc_test.py
tests/unit/doc_test.py
# -*- coding: utf-8 -*- ''' tests.unit.doc_test ~~~~~~~~~~~~~~~~~~~~ ''' # Import Python libs from __future__ import absolute_import import os # Import Salt Testing libs from salttesting import TestCase from salttesting.helpers import ensure_in_syspath # Import Salt libs import salt.modules.cmdmod ensure_in_syspath('../') class DocTestCase(TestCase): ''' Unit test case for testing doc files and strings. ''' def test_check_for_doc_inline_markup(self): ''' We should not be using the ``:doc:`` inline markup option when cross-referencing locations. Use ``:ref:`` or ``:mod:`` instead. This test checks for reference to ``:doc:`` usage. See Issue #12788 for more information. https://github.com/saltstack/salt/issues/12788 ''' salt_dir = os.path.dirname(os.path.realpath(__file__)).rsplit('/', 2)[0] salt_dir += '/' cmd = 'grep -r :doc: ' + salt_dir grep_call = salt.modules.cmdmod.run_stdout(cmd=cmd).split('\n') test_ret = {} for line in grep_call: # Skip any .pyc files that may be present if line.startswith('Binary'): continue key, val = line.split(':', 1) # Don't test man pages or this file if 'man' in key or key.endswith('doc_test.py'): continue # Don't test the page that documents to not use ":doc:" if key.endswith('/conventions/documentation.rst'): continue # Set up test return dict if test_ret.get(key) is None: test_ret[key] = [val.lstrip()] else: test_ret[key].append(val.lstrip()) # Allow test results to show files with :doc: ref, rather than truncating self.maxDiff = None # test_ret should be empty, otherwise there are :doc: references present self.assertEqual(test_ret, {}) if __name__ == '__main__': from integration import run_tests run_tests(DocTestCase, needs_daemon=False)
Add a unit test to search for new doc markup refs
Add a unit test to search for new doc markup refs
Python
apache-2.0
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
Add a unit test to search for new doc markup refs
# -*- coding: utf-8 -*- ''' tests.unit.doc_test ~~~~~~~~~~~~~~~~~~~~ ''' # Import Python libs from __future__ import absolute_import import os # Import Salt Testing libs from salttesting import TestCase from salttesting.helpers import ensure_in_syspath # Import Salt libs import salt.modules.cmdmod ensure_in_syspath('../') class DocTestCase(TestCase): ''' Unit test case for testing doc files and strings. ''' def test_check_for_doc_inline_markup(self): ''' We should not be using the ``:doc:`` inline markup option when cross-referencing locations. Use ``:ref:`` or ``:mod:`` instead. This test checks for reference to ``:doc:`` usage. See Issue #12788 for more information. https://github.com/saltstack/salt/issues/12788 ''' salt_dir = os.path.dirname(os.path.realpath(__file__)).rsplit('/', 2)[0] salt_dir += '/' cmd = 'grep -r :doc: ' + salt_dir grep_call = salt.modules.cmdmod.run_stdout(cmd=cmd).split('\n') test_ret = {} for line in grep_call: # Skip any .pyc files that may be present if line.startswith('Binary'): continue key, val = line.split(':', 1) # Don't test man pages or this file if 'man' in key or key.endswith('doc_test.py'): continue # Don't test the page that documents to not use ":doc:" if key.endswith('/conventions/documentation.rst'): continue # Set up test return dict if test_ret.get(key) is None: test_ret[key] = [val.lstrip()] else: test_ret[key].append(val.lstrip()) # Allow test results to show files with :doc: ref, rather than truncating self.maxDiff = None # test_ret should be empty, otherwise there are :doc: references present self.assertEqual(test_ret, {}) if __name__ == '__main__': from integration import run_tests run_tests(DocTestCase, needs_daemon=False)
<commit_before><commit_msg>Add a unit test to search for new doc markup refs<commit_after>
# -*- coding: utf-8 -*- ''' tests.unit.doc_test ~~~~~~~~~~~~~~~~~~~~ ''' # Import Python libs from __future__ import absolute_import import os # Import Salt Testing libs from salttesting import TestCase from salttesting.helpers import ensure_in_syspath # Import Salt libs import salt.modules.cmdmod ensure_in_syspath('../') class DocTestCase(TestCase): ''' Unit test case for testing doc files and strings. ''' def test_check_for_doc_inline_markup(self): ''' We should not be using the ``:doc:`` inline markup option when cross-referencing locations. Use ``:ref:`` or ``:mod:`` instead. This test checks for reference to ``:doc:`` usage. See Issue #12788 for more information. https://github.com/saltstack/salt/issues/12788 ''' salt_dir = os.path.dirname(os.path.realpath(__file__)).rsplit('/', 2)[0] salt_dir += '/' cmd = 'grep -r :doc: ' + salt_dir grep_call = salt.modules.cmdmod.run_stdout(cmd=cmd).split('\n') test_ret = {} for line in grep_call: # Skip any .pyc files that may be present if line.startswith('Binary'): continue key, val = line.split(':', 1) # Don't test man pages or this file if 'man' in key or key.endswith('doc_test.py'): continue # Don't test the page that documents to not use ":doc:" if key.endswith('/conventions/documentation.rst'): continue # Set up test return dict if test_ret.get(key) is None: test_ret[key] = [val.lstrip()] else: test_ret[key].append(val.lstrip()) # Allow test results to show files with :doc: ref, rather than truncating self.maxDiff = None # test_ret should be empty, otherwise there are :doc: references present self.assertEqual(test_ret, {}) if __name__ == '__main__': from integration import run_tests run_tests(DocTestCase, needs_daemon=False)
Add a unit test to search for new doc markup refs# -*- coding: utf-8 -*- ''' tests.unit.doc_test ~~~~~~~~~~~~~~~~~~~~ ''' # Import Python libs from __future__ import absolute_import import os # Import Salt Testing libs from salttesting import TestCase from salttesting.helpers import ensure_in_syspath # Import Salt libs import salt.modules.cmdmod ensure_in_syspath('../') class DocTestCase(TestCase): ''' Unit test case for testing doc files and strings. ''' def test_check_for_doc_inline_markup(self): ''' We should not be using the ``:doc:`` inline markup option when cross-referencing locations. Use ``:ref:`` or ``:mod:`` instead. This test checks for reference to ``:doc:`` usage. See Issue #12788 for more information. https://github.com/saltstack/salt/issues/12788 ''' salt_dir = os.path.dirname(os.path.realpath(__file__)).rsplit('/', 2)[0] salt_dir += '/' cmd = 'grep -r :doc: ' + salt_dir grep_call = salt.modules.cmdmod.run_stdout(cmd=cmd).split('\n') test_ret = {} for line in grep_call: # Skip any .pyc files that may be present if line.startswith('Binary'): continue key, val = line.split(':', 1) # Don't test man pages or this file if 'man' in key or key.endswith('doc_test.py'): continue # Don't test the page that documents to not use ":doc:" if key.endswith('/conventions/documentation.rst'): continue # Set up test return dict if test_ret.get(key) is None: test_ret[key] = [val.lstrip()] else: test_ret[key].append(val.lstrip()) # Allow test results to show files with :doc: ref, rather than truncating self.maxDiff = None # test_ret should be empty, otherwise there are :doc: references present self.assertEqual(test_ret, {}) if __name__ == '__main__': from integration import run_tests run_tests(DocTestCase, needs_daemon=False)
<commit_before><commit_msg>Add a unit test to search for new doc markup refs<commit_after># -*- coding: utf-8 -*- ''' tests.unit.doc_test ~~~~~~~~~~~~~~~~~~~~ ''' # Import Python libs from __future__ import absolute_import import os # Import Salt Testing libs from salttesting import TestCase from salttesting.helpers import ensure_in_syspath # Import Salt libs import salt.modules.cmdmod ensure_in_syspath('../') class DocTestCase(TestCase): ''' Unit test case for testing doc files and strings. ''' def test_check_for_doc_inline_markup(self): ''' We should not be using the ``:doc:`` inline markup option when cross-referencing locations. Use ``:ref:`` or ``:mod:`` instead. This test checks for reference to ``:doc:`` usage. See Issue #12788 for more information. https://github.com/saltstack/salt/issues/12788 ''' salt_dir = os.path.dirname(os.path.realpath(__file__)).rsplit('/', 2)[0] salt_dir += '/' cmd = 'grep -r :doc: ' + salt_dir grep_call = salt.modules.cmdmod.run_stdout(cmd=cmd).split('\n') test_ret = {} for line in grep_call: # Skip any .pyc files that may be present if line.startswith('Binary'): continue key, val = line.split(':', 1) # Don't test man pages or this file if 'man' in key or key.endswith('doc_test.py'): continue # Don't test the page that documents to not use ":doc:" if key.endswith('/conventions/documentation.rst'): continue # Set up test return dict if test_ret.get(key) is None: test_ret[key] = [val.lstrip()] else: test_ret[key].append(val.lstrip()) # Allow test results to show files with :doc: ref, rather than truncating self.maxDiff = None # test_ret should be empty, otherwise there are :doc: references present self.assertEqual(test_ret, {}) if __name__ == '__main__': from integration import run_tests run_tests(DocTestCase, needs_daemon=False)
8d548b31905b79f324bffff43b876b96d95a3525
cli.py
cli.py
#!/usr/bin/env python import argparse import requests import sys URL = 'https://r1d2.herokuapp.com' def get_json(url): return requests.get(url).json() def get_menu(json): return json['menu'] def format_item(name, price): return '{} (CHF {:.2f})'.format(name, price) def print_menu(menu, day=None): if day is not None: print('--- {} ---'.format(day)) for name, price in menu: print(format_item(name, price)) def fetch_today(): return get_menu(get_json(URL)) def fetch_week(): return get_menu(get_json(URL + '/week')) def show_today(): print_menu(fetch_today()) def show_day(day): day = day.lower() j = get_json(URL + '/{}'.format(day)) if 'error' in j: print('Could not fetch menu for day = {}'.format(day)) print_menu(get_menu(j), day=day.capitalize()) def show_week(): days = ('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday') menus = fetch_week() for day, menu in enumerate(menus): print_menu(menu, days[day]) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Fetch menu of R1 at CERN') parser.add_argument( '--day', help='Get menu for specific day (Monday through Friday).') parser.add_argument('--week', action='store_true', help='Get menu for the whole week.') args = parser.parse_args() if args.week: show_week() elif args.day is not None: show_day(args.day) else: show_today()
Add example CLI program that uses the API.
Add example CLI program that uses the API.
Python
mit
kdungs/R1D2
Add example CLI program that uses the API.
#!/usr/bin/env python import argparse import requests import sys URL = 'https://r1d2.herokuapp.com' def get_json(url): return requests.get(url).json() def get_menu(json): return json['menu'] def format_item(name, price): return '{} (CHF {:.2f})'.format(name, price) def print_menu(menu, day=None): if day is not None: print('--- {} ---'.format(day)) for name, price in menu: print(format_item(name, price)) def fetch_today(): return get_menu(get_json(URL)) def fetch_week(): return get_menu(get_json(URL + '/week')) def show_today(): print_menu(fetch_today()) def show_day(day): day = day.lower() j = get_json(URL + '/{}'.format(day)) if 'error' in j: print('Could not fetch menu for day = {}'.format(day)) print_menu(get_menu(j), day=day.capitalize()) def show_week(): days = ('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday') menus = fetch_week() for day, menu in enumerate(menus): print_menu(menu, days[day]) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Fetch menu of R1 at CERN') parser.add_argument( '--day', help='Get menu for specific day (Monday through Friday).') parser.add_argument('--week', action='store_true', help='Get menu for the whole week.') args = parser.parse_args() if args.week: show_week() elif args.day is not None: show_day(args.day) else: show_today()
<commit_before><commit_msg>Add example CLI program that uses the API.<commit_after>
#!/usr/bin/env python import argparse import requests import sys URL = 'https://r1d2.herokuapp.com' def get_json(url): return requests.get(url).json() def get_menu(json): return json['menu'] def format_item(name, price): return '{} (CHF {:.2f})'.format(name, price) def print_menu(menu, day=None): if day is not None: print('--- {} ---'.format(day)) for name, price in menu: print(format_item(name, price)) def fetch_today(): return get_menu(get_json(URL)) def fetch_week(): return get_menu(get_json(URL + '/week')) def show_today(): print_menu(fetch_today()) def show_day(day): day = day.lower() j = get_json(URL + '/{}'.format(day)) if 'error' in j: print('Could not fetch menu for day = {}'.format(day)) print_menu(get_menu(j), day=day.capitalize()) def show_week(): days = ('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday') menus = fetch_week() for day, menu in enumerate(menus): print_menu(menu, days[day]) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Fetch menu of R1 at CERN') parser.add_argument( '--day', help='Get menu for specific day (Monday through Friday).') parser.add_argument('--week', action='store_true', help='Get menu for the whole week.') args = parser.parse_args() if args.week: show_week() elif args.day is not None: show_day(args.day) else: show_today()
Add example CLI program that uses the API.#!/usr/bin/env python import argparse import requests import sys URL = 'https://r1d2.herokuapp.com' def get_json(url): return requests.get(url).json() def get_menu(json): return json['menu'] def format_item(name, price): return '{} (CHF {:.2f})'.format(name, price) def print_menu(menu, day=None): if day is not None: print('--- {} ---'.format(day)) for name, price in menu: print(format_item(name, price)) def fetch_today(): return get_menu(get_json(URL)) def fetch_week(): return get_menu(get_json(URL + '/week')) def show_today(): print_menu(fetch_today()) def show_day(day): day = day.lower() j = get_json(URL + '/{}'.format(day)) if 'error' in j: print('Could not fetch menu for day = {}'.format(day)) print_menu(get_menu(j), day=day.capitalize()) def show_week(): days = ('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday') menus = fetch_week() for day, menu in enumerate(menus): print_menu(menu, days[day]) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Fetch menu of R1 at CERN') parser.add_argument( '--day', help='Get menu for specific day (Monday through Friday).') parser.add_argument('--week', action='store_true', help='Get menu for the whole week.') args = parser.parse_args() if args.week: show_week() elif args.day is not None: show_day(args.day) else: show_today()
<commit_before><commit_msg>Add example CLI program that uses the API.<commit_after>#!/usr/bin/env python import argparse import requests import sys URL = 'https://r1d2.herokuapp.com' def get_json(url): return requests.get(url).json() def get_menu(json): return json['menu'] def format_item(name, price): return '{} (CHF {:.2f})'.format(name, price) def print_menu(menu, day=None): if day is not None: print('--- {} ---'.format(day)) for name, price in menu: print(format_item(name, price)) def fetch_today(): return get_menu(get_json(URL)) def fetch_week(): return get_menu(get_json(URL + '/week')) def show_today(): print_menu(fetch_today()) def show_day(day): day = day.lower() j = get_json(URL + '/{}'.format(day)) if 'error' in j: print('Could not fetch menu for day = {}'.format(day)) print_menu(get_menu(j), day=day.capitalize()) def show_week(): days = ('Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday') menus = fetch_week() for day, menu in enumerate(menus): print_menu(menu, days[day]) if __name__ == '__main__': parser = argparse.ArgumentParser(description='Fetch menu of R1 at CERN') parser.add_argument( '--day', help='Get menu for specific day (Monday through Friday).') parser.add_argument('--week', action='store_true', help='Get menu for the whole week.') args = parser.parse_args() if args.week: show_week() elif args.day is not None: show_day(args.day) else: show_today()
83a5e3ea71a263718c9c5e43936e7426629e4e85
src/mmw/apps/modeling/migrations/0039_override_sedaadjust_for_old_scenarios.py
src/mmw/apps/modeling/migrations/0039_override_sedaadjust_for_old_scenarios.py
# Generated by Django 3.2.10 on 2021-12-27 19:26 import json from django.db import migrations def override_sedaadjust_for_old_projects(apps, schema_editor): """ The default value of SedAAdjust is being changed from 1.5 to 1.25 for all new projects, which will use the high resolution "nhdhr" stream data. For older projects using the medium resolution "nhd" data, we override the value to be 1.5, so they remain consistent with old data, unless they were overridden by a user. """ db_alias = schema_editor.connection.alias Project = apps.get_model('modeling', 'Project') ps = Project.objects.filter(layer_overrides__contains={'__STREAMS__':'nhd'}) for p in ps: for s in p.scenarios.all(): mods = json.loads(s.modifications) m_other = next((m for m in mods if m['modKey'] == 'entry_other'), None) if m_other: if 'SedAAdjust' not in m_other['output']: m_other['output']['SedAAdjust'] = 1.5 m_other['userInput']['SedAAdjust'] = 1.5 s.modifications = json.dumps(mods) s.save() else: mods.append({ 'modKey': 'entry_other', 'output': {'SedAAdjust': 1.5}, 'userInput': {'SedAAdjust': 1.5}}) s.modifications = json.dumps(mods) s.save() class Migration(migrations.Migration): dependencies = [ ('modeling', '0038_alter_project_layer_overrides'), ] operations = [ migrations.RunPython(override_sedaadjust_for_old_projects), ]
Use old SedAAdjust in all existing NHD projects
Use old SedAAdjust in all existing NHD projects The new SedAAdjust value applies to those projects using the NHD Hi Res dataset. For older projects using NHD Medium Res, the old value is more appropriate. This migration sets that value to all old projects, unless that had already been overridden by a user.
Python
apache-2.0
WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed
Use old SedAAdjust in all existing NHD projects The new SedAAdjust value applies to those projects using the NHD Hi Res dataset. For older projects using NHD Medium Res, the old value is more appropriate. This migration sets that value to all old projects, unless that had already been overridden by a user.
# Generated by Django 3.2.10 on 2021-12-27 19:26 import json from django.db import migrations def override_sedaadjust_for_old_projects(apps, schema_editor): """ The default value of SedAAdjust is being changed from 1.5 to 1.25 for all new projects, which will use the high resolution "nhdhr" stream data. For older projects using the medium resolution "nhd" data, we override the value to be 1.5, so they remain consistent with old data, unless they were overridden by a user. """ db_alias = schema_editor.connection.alias Project = apps.get_model('modeling', 'Project') ps = Project.objects.filter(layer_overrides__contains={'__STREAMS__':'nhd'}) for p in ps: for s in p.scenarios.all(): mods = json.loads(s.modifications) m_other = next((m for m in mods if m['modKey'] == 'entry_other'), None) if m_other: if 'SedAAdjust' not in m_other['output']: m_other['output']['SedAAdjust'] = 1.5 m_other['userInput']['SedAAdjust'] = 1.5 s.modifications = json.dumps(mods) s.save() else: mods.append({ 'modKey': 'entry_other', 'output': {'SedAAdjust': 1.5}, 'userInput': {'SedAAdjust': 1.5}}) s.modifications = json.dumps(mods) s.save() class Migration(migrations.Migration): dependencies = [ ('modeling', '0038_alter_project_layer_overrides'), ] operations = [ migrations.RunPython(override_sedaadjust_for_old_projects), ]
<commit_before><commit_msg>Use old SedAAdjust in all existing NHD projects The new SedAAdjust value applies to those projects using the NHD Hi Res dataset. For older projects using NHD Medium Res, the old value is more appropriate. This migration sets that value to all old projects, unless that had already been overridden by a user.<commit_after>
# Generated by Django 3.2.10 on 2021-12-27 19:26 import json from django.db import migrations def override_sedaadjust_for_old_projects(apps, schema_editor): """ The default value of SedAAdjust is being changed from 1.5 to 1.25 for all new projects, which will use the high resolution "nhdhr" stream data. For older projects using the medium resolution "nhd" data, we override the value to be 1.5, so they remain consistent with old data, unless they were overridden by a user. """ db_alias = schema_editor.connection.alias Project = apps.get_model('modeling', 'Project') ps = Project.objects.filter(layer_overrides__contains={'__STREAMS__':'nhd'}) for p in ps: for s in p.scenarios.all(): mods = json.loads(s.modifications) m_other = next((m for m in mods if m['modKey'] == 'entry_other'), None) if m_other: if 'SedAAdjust' not in m_other['output']: m_other['output']['SedAAdjust'] = 1.5 m_other['userInput']['SedAAdjust'] = 1.5 s.modifications = json.dumps(mods) s.save() else: mods.append({ 'modKey': 'entry_other', 'output': {'SedAAdjust': 1.5}, 'userInput': {'SedAAdjust': 1.5}}) s.modifications = json.dumps(mods) s.save() class Migration(migrations.Migration): dependencies = [ ('modeling', '0038_alter_project_layer_overrides'), ] operations = [ migrations.RunPython(override_sedaadjust_for_old_projects), ]
Use old SedAAdjust in all existing NHD projects The new SedAAdjust value applies to those projects using the NHD Hi Res dataset. For older projects using NHD Medium Res, the old value is more appropriate. This migration sets that value to all old projects, unless that had already been overridden by a user.# Generated by Django 3.2.10 on 2021-12-27 19:26 import json from django.db import migrations def override_sedaadjust_for_old_projects(apps, schema_editor): """ The default value of SedAAdjust is being changed from 1.5 to 1.25 for all new projects, which will use the high resolution "nhdhr" stream data. For older projects using the medium resolution "nhd" data, we override the value to be 1.5, so they remain consistent with old data, unless they were overridden by a user. """ db_alias = schema_editor.connection.alias Project = apps.get_model('modeling', 'Project') ps = Project.objects.filter(layer_overrides__contains={'__STREAMS__':'nhd'}) for p in ps: for s in p.scenarios.all(): mods = json.loads(s.modifications) m_other = next((m for m in mods if m['modKey'] == 'entry_other'), None) if m_other: if 'SedAAdjust' not in m_other['output']: m_other['output']['SedAAdjust'] = 1.5 m_other['userInput']['SedAAdjust'] = 1.5 s.modifications = json.dumps(mods) s.save() else: mods.append({ 'modKey': 'entry_other', 'output': {'SedAAdjust': 1.5}, 'userInput': {'SedAAdjust': 1.5}}) s.modifications = json.dumps(mods) s.save() class Migration(migrations.Migration): dependencies = [ ('modeling', '0038_alter_project_layer_overrides'), ] operations = [ migrations.RunPython(override_sedaadjust_for_old_projects), ]
<commit_before><commit_msg>Use old SedAAdjust in all existing NHD projects The new SedAAdjust value applies to those projects using the NHD Hi Res dataset. For older projects using NHD Medium Res, the old value is more appropriate. This migration sets that value to all old projects, unless that had already been overridden by a user.<commit_after># Generated by Django 3.2.10 on 2021-12-27 19:26 import json from django.db import migrations def override_sedaadjust_for_old_projects(apps, schema_editor): """ The default value of SedAAdjust is being changed from 1.5 to 1.25 for all new projects, which will use the high resolution "nhdhr" stream data. For older projects using the medium resolution "nhd" data, we override the value to be 1.5, so they remain consistent with old data, unless they were overridden by a user. """ db_alias = schema_editor.connection.alias Project = apps.get_model('modeling', 'Project') ps = Project.objects.filter(layer_overrides__contains={'__STREAMS__':'nhd'}) for p in ps: for s in p.scenarios.all(): mods = json.loads(s.modifications) m_other = next((m for m in mods if m['modKey'] == 'entry_other'), None) if m_other: if 'SedAAdjust' not in m_other['output']: m_other['output']['SedAAdjust'] = 1.5 m_other['userInput']['SedAAdjust'] = 1.5 s.modifications = json.dumps(mods) s.save() else: mods.append({ 'modKey': 'entry_other', 'output': {'SedAAdjust': 1.5}, 'userInput': {'SedAAdjust': 1.5}}) s.modifications = json.dumps(mods) s.save() class Migration(migrations.Migration): dependencies = [ ('modeling', '0038_alter_project_layer_overrides'), ] operations = [ migrations.RunPython(override_sedaadjust_for_old_projects), ]
808d8deebc154822473316cea7838bab41c4daf1
CodeFights/secondRightmostZeroBit.py
CodeFights/secondRightmostZeroBit.py
#!/usr/local/bin/python # Code Second Right-most Zero Bit (Core) Problem def secondRightmostZeroBit(n): # print('Number: {}\nBinary: {}'.format(n, bin(n))) # Solution: # return (((((n + 1) | n) + 1) | n) - n) return [2**i for i in range(len(bin(n)[2:])) if (n >> i) % 2 == 0][1] def main(): tests = [ [37, 8], [1073741824, 2], [83748, 2], [4, 2], [728782938, 4] ] for t in tests: res = secondRightmostZeroBit(t[0]) if t[1] == res: print("PASSED: secondRightmostZeroBit({}) returned {}" .format(t[0], res)) else: print(("FAILED: secondRightmostZeroBit({}) returned {}," "answer: {}").format(t[0], res, t[1])) if __name__ == '__main__': main()
Solve Code Fights second rightmost zero bit problem
Solve Code Fights second rightmost zero bit problem
Python
mit
HKuz/Test_Code
Solve Code Fights second rightmost zero bit problem
#!/usr/local/bin/python # Code Second Right-most Zero Bit (Core) Problem def secondRightmostZeroBit(n): # print('Number: {}\nBinary: {}'.format(n, bin(n))) # Solution: # return (((((n + 1) | n) + 1) | n) - n) return [2**i for i in range(len(bin(n)[2:])) if (n >> i) % 2 == 0][1] def main(): tests = [ [37, 8], [1073741824, 2], [83748, 2], [4, 2], [728782938, 4] ] for t in tests: res = secondRightmostZeroBit(t[0]) if t[1] == res: print("PASSED: secondRightmostZeroBit({}) returned {}" .format(t[0], res)) else: print(("FAILED: secondRightmostZeroBit({}) returned {}," "answer: {}").format(t[0], res, t[1])) if __name__ == '__main__': main()
<commit_before><commit_msg>Solve Code Fights second rightmost zero bit problem<commit_after>
#!/usr/local/bin/python # Code Second Right-most Zero Bit (Core) Problem def secondRightmostZeroBit(n): # print('Number: {}\nBinary: {}'.format(n, bin(n))) # Solution: # return (((((n + 1) | n) + 1) | n) - n) return [2**i for i in range(len(bin(n)[2:])) if (n >> i) % 2 == 0][1] def main(): tests = [ [37, 8], [1073741824, 2], [83748, 2], [4, 2], [728782938, 4] ] for t in tests: res = secondRightmostZeroBit(t[0]) if t[1] == res: print("PASSED: secondRightmostZeroBit({}) returned {}" .format(t[0], res)) else: print(("FAILED: secondRightmostZeroBit({}) returned {}," "answer: {}").format(t[0], res, t[1])) if __name__ == '__main__': main()
Solve Code Fights second rightmost zero bit problem#!/usr/local/bin/python # Code Second Right-most Zero Bit (Core) Problem def secondRightmostZeroBit(n): # print('Number: {}\nBinary: {}'.format(n, bin(n))) # Solution: # return (((((n + 1) | n) + 1) | n) - n) return [2**i for i in range(len(bin(n)[2:])) if (n >> i) % 2 == 0][1] def main(): tests = [ [37, 8], [1073741824, 2], [83748, 2], [4, 2], [728782938, 4] ] for t in tests: res = secondRightmostZeroBit(t[0]) if t[1] == res: print("PASSED: secondRightmostZeroBit({}) returned {}" .format(t[0], res)) else: print(("FAILED: secondRightmostZeroBit({}) returned {}," "answer: {}").format(t[0], res, t[1])) if __name__ == '__main__': main()
<commit_before><commit_msg>Solve Code Fights second rightmost zero bit problem<commit_after>#!/usr/local/bin/python # Code Second Right-most Zero Bit (Core) Problem def secondRightmostZeroBit(n): # print('Number: {}\nBinary: {}'.format(n, bin(n))) # Solution: # return (((((n + 1) | n) + 1) | n) - n) return [2**i for i in range(len(bin(n)[2:])) if (n >> i) % 2 == 0][1] def main(): tests = [ [37, 8], [1073741824, 2], [83748, 2], [4, 2], [728782938, 4] ] for t in tests: res = secondRightmostZeroBit(t[0]) if t[1] == res: print("PASSED: secondRightmostZeroBit({}) returned {}" .format(t[0], res)) else: print(("FAILED: secondRightmostZeroBit({}) returned {}," "answer: {}").format(t[0], res, t[1])) if __name__ == '__main__': main()
4ad05d8e05c115ed5f881351752f7c25fd611bdd
test/test_basic.py
test/test_basic.py
from __future__ import nested_scopes from twisted.internet import reactor import unittest from twistedsnmp.test import basetestcase from twistedsnmp import tableretriever, agentproxy from twistedsnmp.pysnmpproto import v2c,v1, error class BasicProxyTests( basetestcase.BaseTestCase ): version = 'v2c' def testBulkRequestCreate( self ): """Test that we can create bulk requests""" request = self.client.encode( [ '.1.3.6' ], self.client.community, next= True, bulk = True, maxRepetitions = 256, ) class TableRetrieverTests( basetestcase.BaseTestCase ): version = 'v2c' def testCreation( self ): self.installMessageCounter() tr = tableretriever.TableRetriever( self.client, ['.1.3.6'], ) if __name__ == "__main__": unittest.main()
Test for very basic operations
Test for very basic operations
Python
bsd-3-clause
mmattice/TwistedSNMP
Test for very basic operations
from __future__ import nested_scopes from twisted.internet import reactor import unittest from twistedsnmp.test import basetestcase from twistedsnmp import tableretriever, agentproxy from twistedsnmp.pysnmpproto import v2c,v1, error class BasicProxyTests( basetestcase.BaseTestCase ): version = 'v2c' def testBulkRequestCreate( self ): """Test that we can create bulk requests""" request = self.client.encode( [ '.1.3.6' ], self.client.community, next= True, bulk = True, maxRepetitions = 256, ) class TableRetrieverTests( basetestcase.BaseTestCase ): version = 'v2c' def testCreation( self ): self.installMessageCounter() tr = tableretriever.TableRetriever( self.client, ['.1.3.6'], ) if __name__ == "__main__": unittest.main()
<commit_before><commit_msg>Test for very basic operations<commit_after>
from __future__ import nested_scopes from twisted.internet import reactor import unittest from twistedsnmp.test import basetestcase from twistedsnmp import tableretriever, agentproxy from twistedsnmp.pysnmpproto import v2c,v1, error class BasicProxyTests( basetestcase.BaseTestCase ): version = 'v2c' def testBulkRequestCreate( self ): """Test that we can create bulk requests""" request = self.client.encode( [ '.1.3.6' ], self.client.community, next= True, bulk = True, maxRepetitions = 256, ) class TableRetrieverTests( basetestcase.BaseTestCase ): version = 'v2c' def testCreation( self ): self.installMessageCounter() tr = tableretriever.TableRetriever( self.client, ['.1.3.6'], ) if __name__ == "__main__": unittest.main()
Test for very basic operationsfrom __future__ import nested_scopes from twisted.internet import reactor import unittest from twistedsnmp.test import basetestcase from twistedsnmp import tableretriever, agentproxy from twistedsnmp.pysnmpproto import v2c,v1, error class BasicProxyTests( basetestcase.BaseTestCase ): version = 'v2c' def testBulkRequestCreate( self ): """Test that we can create bulk requests""" request = self.client.encode( [ '.1.3.6' ], self.client.community, next= True, bulk = True, maxRepetitions = 256, ) class TableRetrieverTests( basetestcase.BaseTestCase ): version = 'v2c' def testCreation( self ): self.installMessageCounter() tr = tableretriever.TableRetriever( self.client, ['.1.3.6'], ) if __name__ == "__main__": unittest.main()
<commit_before><commit_msg>Test for very basic operations<commit_after>from __future__ import nested_scopes from twisted.internet import reactor import unittest from twistedsnmp.test import basetestcase from twistedsnmp import tableretriever, agentproxy from twistedsnmp.pysnmpproto import v2c,v1, error class BasicProxyTests( basetestcase.BaseTestCase ): version = 'v2c' def testBulkRequestCreate( self ): """Test that we can create bulk requests""" request = self.client.encode( [ '.1.3.6' ], self.client.community, next= True, bulk = True, maxRepetitions = 256, ) class TableRetrieverTests( basetestcase.BaseTestCase ): version = 'v2c' def testCreation( self ): self.installMessageCounter() tr = tableretriever.TableRetriever( self.client, ['.1.3.6'], ) if __name__ == "__main__": unittest.main()
d111aeb7270b68b0dc845f0a7e32a41e687920b9
test/test_board.py
test/test_board.py
import unittest import board class BoardTest(unittest.TestCase): def setUp(self): self.board_size = 9 self.testBoard = board.Board(self.board_size) #self.testBoard.place_piece(2, 3) def test_create_board(self): self.assertEqual(self.board_size, self.testBoard.board_size) def test_is_on_board(self): self.fail('unit test test_is_on_board is not done') def test_is_piece_set(self): self.fail('unit test test_is_piece_set is not done') #self.assertRaises(Exception, self.testBoard.place_piece, 2, 3) if __name__ == '__main__': unittest.main()
Add frame for unit test on board
Add frame for unit test on board
Python
mit
jonbrohauge/pySudokuSolver
Add frame for unit test on board
import unittest import board class BoardTest(unittest.TestCase): def setUp(self): self.board_size = 9 self.testBoard = board.Board(self.board_size) #self.testBoard.place_piece(2, 3) def test_create_board(self): self.assertEqual(self.board_size, self.testBoard.board_size) def test_is_on_board(self): self.fail('unit test test_is_on_board is not done') def test_is_piece_set(self): self.fail('unit test test_is_piece_set is not done') #self.assertRaises(Exception, self.testBoard.place_piece, 2, 3) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Add frame for unit test on board<commit_after>
import unittest import board class BoardTest(unittest.TestCase): def setUp(self): self.board_size = 9 self.testBoard = board.Board(self.board_size) #self.testBoard.place_piece(2, 3) def test_create_board(self): self.assertEqual(self.board_size, self.testBoard.board_size) def test_is_on_board(self): self.fail('unit test test_is_on_board is not done') def test_is_piece_set(self): self.fail('unit test test_is_piece_set is not done') #self.assertRaises(Exception, self.testBoard.place_piece, 2, 3) if __name__ == '__main__': unittest.main()
Add frame for unit test on boardimport unittest import board class BoardTest(unittest.TestCase): def setUp(self): self.board_size = 9 self.testBoard = board.Board(self.board_size) #self.testBoard.place_piece(2, 3) def test_create_board(self): self.assertEqual(self.board_size, self.testBoard.board_size) def test_is_on_board(self): self.fail('unit test test_is_on_board is not done') def test_is_piece_set(self): self.fail('unit test test_is_piece_set is not done') #self.assertRaises(Exception, self.testBoard.place_piece, 2, 3) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Add frame for unit test on board<commit_after>import unittest import board class BoardTest(unittest.TestCase): def setUp(self): self.board_size = 9 self.testBoard = board.Board(self.board_size) #self.testBoard.place_piece(2, 3) def test_create_board(self): self.assertEqual(self.board_size, self.testBoard.board_size) def test_is_on_board(self): self.fail('unit test test_is_on_board is not done') def test_is_piece_set(self): self.fail('unit test test_is_piece_set is not done') #self.assertRaises(Exception, self.testBoard.place_piece, 2, 3) if __name__ == '__main__': unittest.main()
4a99f17a80da11655bc97372572220f1f0bb0d15
tools/reformat.py
tools/reformat.py
#!/usr/bin/python3 # Searches the assets/bibs/ directory and reformat all json files import json import os DIR = 'opacclient/opacapp/src/main/assets/bibs/' for filename in os.listdir(DIR): f = os.path.join(DIR, filename) data = json.load(open(f)) json.dump(data, open(f, 'w'), indent=4, sort_keys=True)
Add tool for re-indenting JSON files
Add tool for re-indenting JSON files
Python
mit
ruediger-w/opacclient,johan12345/opacclient,raphaelm/opacclient,raphaelm/opacclient,ruediger-w/opacclient,opacapp/opacclient,simon04/opacclient,ruediger-w/opacclient,simon04/opacclient,johan12345/opacclient,johan12345/opacclient,ruediger-w/opacclient,opacapp/opacclient,hurzl/opacclient,ruediger-w/opacclient,thesebas/opacclient,simon04/opacclient,johan12345/opacclient,hurzl/opacclient,thesebas/opacclient,opacapp/opacclient,johan12345/opacclient,geomcmaster/opacclient,opacapp/opacclient,opacapp/opacclient,raphaelm/opacclient,thesebas/opacclient,hurzl/opacclient,geomcmaster/opacclient,geomcmaster/opacclient
Add tool for re-indenting JSON files
#!/usr/bin/python3 # Searches the assets/bibs/ directory and reformat all json files import json import os DIR = 'opacclient/opacapp/src/main/assets/bibs/' for filename in os.listdir(DIR): f = os.path.join(DIR, filename) data = json.load(open(f)) json.dump(data, open(f, 'w'), indent=4, sort_keys=True)
<commit_before><commit_msg>Add tool for re-indenting JSON files<commit_after>
#!/usr/bin/python3 # Searches the assets/bibs/ directory and reformat all json files import json import os DIR = 'opacclient/opacapp/src/main/assets/bibs/' for filename in os.listdir(DIR): f = os.path.join(DIR, filename) data = json.load(open(f)) json.dump(data, open(f, 'w'), indent=4, sort_keys=True)
Add tool for re-indenting JSON files#!/usr/bin/python3 # Searches the assets/bibs/ directory and reformat all json files import json import os DIR = 'opacclient/opacapp/src/main/assets/bibs/' for filename in os.listdir(DIR): f = os.path.join(DIR, filename) data = json.load(open(f)) json.dump(data, open(f, 'w'), indent=4, sort_keys=True)
<commit_before><commit_msg>Add tool for re-indenting JSON files<commit_after>#!/usr/bin/python3 # Searches the assets/bibs/ directory and reformat all json files import json import os DIR = 'opacclient/opacapp/src/main/assets/bibs/' for filename in os.listdir(DIR): f = os.path.join(DIR, filename) data = json.load(open(f)) json.dump(data, open(f, 'w'), indent=4, sort_keys=True)
a65187bebdec6ec99a9b5e967e818948ffb70969
test_pcalg.py
test_pcalg.py
# -*- coding: utf-8 -*- ''' Test suite for pcalg ''' import networkx as nx import numpy as np from gsq.ci_tests import ci_test_bin, ci_test_dis from gsq.gsq_testdata import bin_data, dis_data import pytest from pcalg import estimate_cpdag from pcalg import estimate_skeleton @pytest.mark.parametrize(('indep_test_func', 'data_matrix', 'g_answer'), [ (ci_test_bin, np.array(bin_data).reshape((5000, 5)), nx.DiGraph({ 0: (1, ), 1: (), 2: (3, 4), 3: (1, 2), 4: (1, 2), })), (ci_test_dis, np.array(dis_data).reshape((10000, 5)), nx.DiGraph({ 0: (2, ), 1: (2, 3), 2: (), 3: (), 4: (3, ), })), ]) def test_estimate_cpdag(indep_test_func, data_matrix, g_answer, alpha=0.01): ''' estimate_cpdag should reveal the answer ''' (graph, sep_set) = estimate_skeleton(indep_test_func=indep_test_func, data_matrix=data_matrix, alpha=alpha) graph = estimate_cpdag(skel_graph=graph, sep_set=sep_set) error_msg = 'True edges should be: %s' % (g_answer.edges(), ) assert nx.is_isomorphic(graph, g_answer), error_msg def test_fixed_edges(): ''' The fixed edges shall appear in the skeleton ''' data_matrix = np.array(bin_data).reshape((5000, 5)) (graph, sep_set) = estimate_skeleton(indep_test_func=ci_test_bin, data_matrix=data_matrix, alpha=0.01) graph = estimate_cpdag(skel_graph=graph, sep_set=sep_set) assert not graph.has_edge(1, 2) fixed_edges = nx.DiGraph() fixed_edges.add_nodes_from(range(5)) fixed_edges.add_edge(1, 2) with pytest.raises(ValueError): _ = estimate_skeleton(indep_test_func=ci_test_bin, data_matrix=data_matrix, alpha=0.01, fixed_edges=((1,2), )) with pytest.raises(ValueError): _ = estimate_skeleton(indep_test_func=ci_test_bin, data_matrix=data_matrix, alpha=0.01, fixed_edges=nx.DiGraph({0: (1, )})) (graph, _) = estimate_skeleton(indep_test_func=ci_test_bin, data_matrix=data_matrix, alpha=0.01, fixed_edges=fixed_edges) assert graph.has_edge(1, 2), graph.edges
Add test to verify fixed_edges option
test: Add test to verify fixed_edges option Signed-off-by: limjcst <a207579da1956e43e71ded4d0c18bba514713a1d@163.com>
Python
bsd-2-clause
keiichishima/pcalg
test: Add test to verify fixed_edges option Signed-off-by: limjcst <a207579da1956e43e71ded4d0c18bba514713a1d@163.com>
# -*- coding: utf-8 -*- ''' Test suite for pcalg ''' import networkx as nx import numpy as np from gsq.ci_tests import ci_test_bin, ci_test_dis from gsq.gsq_testdata import bin_data, dis_data import pytest from pcalg import estimate_cpdag from pcalg import estimate_skeleton @pytest.mark.parametrize(('indep_test_func', 'data_matrix', 'g_answer'), [ (ci_test_bin, np.array(bin_data).reshape((5000, 5)), nx.DiGraph({ 0: (1, ), 1: (), 2: (3, 4), 3: (1, 2), 4: (1, 2), })), (ci_test_dis, np.array(dis_data).reshape((10000, 5)), nx.DiGraph({ 0: (2, ), 1: (2, 3), 2: (), 3: (), 4: (3, ), })), ]) def test_estimate_cpdag(indep_test_func, data_matrix, g_answer, alpha=0.01): ''' estimate_cpdag should reveal the answer ''' (graph, sep_set) = estimate_skeleton(indep_test_func=indep_test_func, data_matrix=data_matrix, alpha=alpha) graph = estimate_cpdag(skel_graph=graph, sep_set=sep_set) error_msg = 'True edges should be: %s' % (g_answer.edges(), ) assert nx.is_isomorphic(graph, g_answer), error_msg def test_fixed_edges(): ''' The fixed edges shall appear in the skeleton ''' data_matrix = np.array(bin_data).reshape((5000, 5)) (graph, sep_set) = estimate_skeleton(indep_test_func=ci_test_bin, data_matrix=data_matrix, alpha=0.01) graph = estimate_cpdag(skel_graph=graph, sep_set=sep_set) assert not graph.has_edge(1, 2) fixed_edges = nx.DiGraph() fixed_edges.add_nodes_from(range(5)) fixed_edges.add_edge(1, 2) with pytest.raises(ValueError): _ = estimate_skeleton(indep_test_func=ci_test_bin, data_matrix=data_matrix, alpha=0.01, fixed_edges=((1,2), )) with pytest.raises(ValueError): _ = estimate_skeleton(indep_test_func=ci_test_bin, data_matrix=data_matrix, alpha=0.01, fixed_edges=nx.DiGraph({0: (1, )})) (graph, _) = estimate_skeleton(indep_test_func=ci_test_bin, data_matrix=data_matrix, alpha=0.01, fixed_edges=fixed_edges) assert graph.has_edge(1, 2), graph.edges
<commit_before><commit_msg>test: Add test to verify fixed_edges option Signed-off-by: limjcst <a207579da1956e43e71ded4d0c18bba514713a1d@163.com><commit_after>
# -*- coding: utf-8 -*- ''' Test suite for pcalg ''' import networkx as nx import numpy as np from gsq.ci_tests import ci_test_bin, ci_test_dis from gsq.gsq_testdata import bin_data, dis_data import pytest from pcalg import estimate_cpdag from pcalg import estimate_skeleton @pytest.mark.parametrize(('indep_test_func', 'data_matrix', 'g_answer'), [ (ci_test_bin, np.array(bin_data).reshape((5000, 5)), nx.DiGraph({ 0: (1, ), 1: (), 2: (3, 4), 3: (1, 2), 4: (1, 2), })), (ci_test_dis, np.array(dis_data).reshape((10000, 5)), nx.DiGraph({ 0: (2, ), 1: (2, 3), 2: (), 3: (), 4: (3, ), })), ]) def test_estimate_cpdag(indep_test_func, data_matrix, g_answer, alpha=0.01): ''' estimate_cpdag should reveal the answer ''' (graph, sep_set) = estimate_skeleton(indep_test_func=indep_test_func, data_matrix=data_matrix, alpha=alpha) graph = estimate_cpdag(skel_graph=graph, sep_set=sep_set) error_msg = 'True edges should be: %s' % (g_answer.edges(), ) assert nx.is_isomorphic(graph, g_answer), error_msg def test_fixed_edges(): ''' The fixed edges shall appear in the skeleton ''' data_matrix = np.array(bin_data).reshape((5000, 5)) (graph, sep_set) = estimate_skeleton(indep_test_func=ci_test_bin, data_matrix=data_matrix, alpha=0.01) graph = estimate_cpdag(skel_graph=graph, sep_set=sep_set) assert not graph.has_edge(1, 2) fixed_edges = nx.DiGraph() fixed_edges.add_nodes_from(range(5)) fixed_edges.add_edge(1, 2) with pytest.raises(ValueError): _ = estimate_skeleton(indep_test_func=ci_test_bin, data_matrix=data_matrix, alpha=0.01, fixed_edges=((1,2), )) with pytest.raises(ValueError): _ = estimate_skeleton(indep_test_func=ci_test_bin, data_matrix=data_matrix, alpha=0.01, fixed_edges=nx.DiGraph({0: (1, )})) (graph, _) = estimate_skeleton(indep_test_func=ci_test_bin, data_matrix=data_matrix, alpha=0.01, fixed_edges=fixed_edges) assert graph.has_edge(1, 2), graph.edges
test: Add test to verify fixed_edges option Signed-off-by: limjcst <a207579da1956e43e71ded4d0c18bba514713a1d@163.com># -*- coding: utf-8 -*- ''' Test suite for pcalg ''' import networkx as nx import numpy as np from gsq.ci_tests import ci_test_bin, ci_test_dis from gsq.gsq_testdata import bin_data, dis_data import pytest from pcalg import estimate_cpdag from pcalg import estimate_skeleton @pytest.mark.parametrize(('indep_test_func', 'data_matrix', 'g_answer'), [ (ci_test_bin, np.array(bin_data).reshape((5000, 5)), nx.DiGraph({ 0: (1, ), 1: (), 2: (3, 4), 3: (1, 2), 4: (1, 2), })), (ci_test_dis, np.array(dis_data).reshape((10000, 5)), nx.DiGraph({ 0: (2, ), 1: (2, 3), 2: (), 3: (), 4: (3, ), })), ]) def test_estimate_cpdag(indep_test_func, data_matrix, g_answer, alpha=0.01): ''' estimate_cpdag should reveal the answer ''' (graph, sep_set) = estimate_skeleton(indep_test_func=indep_test_func, data_matrix=data_matrix, alpha=alpha) graph = estimate_cpdag(skel_graph=graph, sep_set=sep_set) error_msg = 'True edges should be: %s' % (g_answer.edges(), ) assert nx.is_isomorphic(graph, g_answer), error_msg def test_fixed_edges(): ''' The fixed edges shall appear in the skeleton ''' data_matrix = np.array(bin_data).reshape((5000, 5)) (graph, sep_set) = estimate_skeleton(indep_test_func=ci_test_bin, data_matrix=data_matrix, alpha=0.01) graph = estimate_cpdag(skel_graph=graph, sep_set=sep_set) assert not graph.has_edge(1, 2) fixed_edges = nx.DiGraph() fixed_edges.add_nodes_from(range(5)) fixed_edges.add_edge(1, 2) with pytest.raises(ValueError): _ = estimate_skeleton(indep_test_func=ci_test_bin, data_matrix=data_matrix, alpha=0.01, fixed_edges=((1,2), )) with pytest.raises(ValueError): _ = estimate_skeleton(indep_test_func=ci_test_bin, data_matrix=data_matrix, alpha=0.01, fixed_edges=nx.DiGraph({0: (1, )})) (graph, _) = estimate_skeleton(indep_test_func=ci_test_bin, data_matrix=data_matrix, alpha=0.01, fixed_edges=fixed_edges) assert graph.has_edge(1, 2), graph.edges
<commit_before><commit_msg>test: Add test to verify fixed_edges option Signed-off-by: limjcst <a207579da1956e43e71ded4d0c18bba514713a1d@163.com><commit_after># -*- coding: utf-8 -*- ''' Test suite for pcalg ''' import networkx as nx import numpy as np from gsq.ci_tests import ci_test_bin, ci_test_dis from gsq.gsq_testdata import bin_data, dis_data import pytest from pcalg import estimate_cpdag from pcalg import estimate_skeleton @pytest.mark.parametrize(('indep_test_func', 'data_matrix', 'g_answer'), [ (ci_test_bin, np.array(bin_data).reshape((5000, 5)), nx.DiGraph({ 0: (1, ), 1: (), 2: (3, 4), 3: (1, 2), 4: (1, 2), })), (ci_test_dis, np.array(dis_data).reshape((10000, 5)), nx.DiGraph({ 0: (2, ), 1: (2, 3), 2: (), 3: (), 4: (3, ), })), ]) def test_estimate_cpdag(indep_test_func, data_matrix, g_answer, alpha=0.01): ''' estimate_cpdag should reveal the answer ''' (graph, sep_set) = estimate_skeleton(indep_test_func=indep_test_func, data_matrix=data_matrix, alpha=alpha) graph = estimate_cpdag(skel_graph=graph, sep_set=sep_set) error_msg = 'True edges should be: %s' % (g_answer.edges(), ) assert nx.is_isomorphic(graph, g_answer), error_msg def test_fixed_edges(): ''' The fixed edges shall appear in the skeleton ''' data_matrix = np.array(bin_data).reshape((5000, 5)) (graph, sep_set) = estimate_skeleton(indep_test_func=ci_test_bin, data_matrix=data_matrix, alpha=0.01) graph = estimate_cpdag(skel_graph=graph, sep_set=sep_set) assert not graph.has_edge(1, 2) fixed_edges = nx.DiGraph() fixed_edges.add_nodes_from(range(5)) fixed_edges.add_edge(1, 2) with pytest.raises(ValueError): _ = estimate_skeleton(indep_test_func=ci_test_bin, data_matrix=data_matrix, alpha=0.01, fixed_edges=((1,2), )) with pytest.raises(ValueError): _ = estimate_skeleton(indep_test_func=ci_test_bin, data_matrix=data_matrix, alpha=0.01, fixed_edges=nx.DiGraph({0: (1, )})) (graph, _) = estimate_skeleton(indep_test_func=ci_test_bin, data_matrix=data_matrix, alpha=0.01, fixed_edges=fixed_edges) assert graph.has_edge(1, 2), graph.edges
ed08bb7cb943c6299c97d44b1c4bd0c53d581cfb
omftools/cli/enumize_translation.py
omftools/cli/enumize_translation.py
import argparse import re from omftools.pyshadowdive.language import LanguageFile re_non_alphanumeric = re.compile(r'[\W]+') def txt(text: str) -> str: o = text.replace(' ', '_') o = re.sub(re_non_alphanumeric, '', o) o = o.upper() return o def generate_enum(in_file: str, out_file: str): language = LanguageFile.load_native(in_file) assert len(language.titles) == len(language.strings) pairs = [(title, text) for title, text in zip(language.titles, language.strings)] with open(out_file, "wb") as fd: fd.write("enum TRANSLATION {\n".encode()) for index, pair in enumerate(pairs, start=1): title, text = pair title = txt(title) if title else 'NONE' text = txt(text) if text else f'NONE_{index}' fd.write(f" TXT__{title}__{text[:24]} = {index};\n".encode()) fd.write("};\n".encode()) def main(): parser = argparse.ArgumentParser(description="Generate enum header for language file") parser.add_argument("input_file", help="Input file") parser.add_argument("output_file", help="Output file") args = parser.parse_args() generate_enum(args.input_file, args.output_file) if __name__ == "__main__": main()
Add tool for generating transtions enum
Add tool for generating transtions enum
Python
mit
omf2097/pyomftools,omf2097/pyomftools
Add tool for generating transtions enum
import argparse import re from omftools.pyshadowdive.language import LanguageFile re_non_alphanumeric = re.compile(r'[\W]+') def txt(text: str) -> str: o = text.replace(' ', '_') o = re.sub(re_non_alphanumeric, '', o) o = o.upper() return o def generate_enum(in_file: str, out_file: str): language = LanguageFile.load_native(in_file) assert len(language.titles) == len(language.strings) pairs = [(title, text) for title, text in zip(language.titles, language.strings)] with open(out_file, "wb") as fd: fd.write("enum TRANSLATION {\n".encode()) for index, pair in enumerate(pairs, start=1): title, text = pair title = txt(title) if title else 'NONE' text = txt(text) if text else f'NONE_{index}' fd.write(f" TXT__{title}__{text[:24]} = {index};\n".encode()) fd.write("};\n".encode()) def main(): parser = argparse.ArgumentParser(description="Generate enum header for language file") parser.add_argument("input_file", help="Input file") parser.add_argument("output_file", help="Output file") args = parser.parse_args() generate_enum(args.input_file, args.output_file) if __name__ == "__main__": main()
<commit_before><commit_msg>Add tool for generating transtions enum<commit_after>
import argparse import re from omftools.pyshadowdive.language import LanguageFile re_non_alphanumeric = re.compile(r'[\W]+') def txt(text: str) -> str: o = text.replace(' ', '_') o = re.sub(re_non_alphanumeric, '', o) o = o.upper() return o def generate_enum(in_file: str, out_file: str): language = LanguageFile.load_native(in_file) assert len(language.titles) == len(language.strings) pairs = [(title, text) for title, text in zip(language.titles, language.strings)] with open(out_file, "wb") as fd: fd.write("enum TRANSLATION {\n".encode()) for index, pair in enumerate(pairs, start=1): title, text = pair title = txt(title) if title else 'NONE' text = txt(text) if text else f'NONE_{index}' fd.write(f" TXT__{title}__{text[:24]} = {index};\n".encode()) fd.write("};\n".encode()) def main(): parser = argparse.ArgumentParser(description="Generate enum header for language file") parser.add_argument("input_file", help="Input file") parser.add_argument("output_file", help="Output file") args = parser.parse_args() generate_enum(args.input_file, args.output_file) if __name__ == "__main__": main()
Add tool for generating transtions enumimport argparse import re from omftools.pyshadowdive.language import LanguageFile re_non_alphanumeric = re.compile(r'[\W]+') def txt(text: str) -> str: o = text.replace(' ', '_') o = re.sub(re_non_alphanumeric, '', o) o = o.upper() return o def generate_enum(in_file: str, out_file: str): language = LanguageFile.load_native(in_file) assert len(language.titles) == len(language.strings) pairs = [(title, text) for title, text in zip(language.titles, language.strings)] with open(out_file, "wb") as fd: fd.write("enum TRANSLATION {\n".encode()) for index, pair in enumerate(pairs, start=1): title, text = pair title = txt(title) if title else 'NONE' text = txt(text) if text else f'NONE_{index}' fd.write(f" TXT__{title}__{text[:24]} = {index};\n".encode()) fd.write("};\n".encode()) def main(): parser = argparse.ArgumentParser(description="Generate enum header for language file") parser.add_argument("input_file", help="Input file") parser.add_argument("output_file", help="Output file") args = parser.parse_args() generate_enum(args.input_file, args.output_file) if __name__ == "__main__": main()
<commit_before><commit_msg>Add tool for generating transtions enum<commit_after>import argparse import re from omftools.pyshadowdive.language import LanguageFile re_non_alphanumeric = re.compile(r'[\W]+') def txt(text: str) -> str: o = text.replace(' ', '_') o = re.sub(re_non_alphanumeric, '', o) o = o.upper() return o def generate_enum(in_file: str, out_file: str): language = LanguageFile.load_native(in_file) assert len(language.titles) == len(language.strings) pairs = [(title, text) for title, text in zip(language.titles, language.strings)] with open(out_file, "wb") as fd: fd.write("enum TRANSLATION {\n".encode()) for index, pair in enumerate(pairs, start=1): title, text = pair title = txt(title) if title else 'NONE' text = txt(text) if text else f'NONE_{index}' fd.write(f" TXT__{title}__{text[:24]} = {index};\n".encode()) fd.write("};\n".encode()) def main(): parser = argparse.ArgumentParser(description="Generate enum header for language file") parser.add_argument("input_file", help="Input file") parser.add_argument("output_file", help="Output file") args = parser.parse_args() generate_enum(args.input_file, args.output_file) if __name__ == "__main__": main()
b7a6463313c46d5d4545b17d49a3eca2e58fa2d3
benchmark/mini_bench.py
benchmark/mini_bench.py
# -*- coding: utf-8 -*- # Copyright 2015 moco_beta # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. setup = """ from janome.tokenizer import Tokenizer t = Tokenizer() # janome (蛇の目) は, Pure Python で書かれた, 辞書内包の形態素解析器です. s = u'janome (\u86c7\u306e\u76ee) \u306f, Pure Python \u3067\u66f8\u304b\u308c\u305f, \u8f9e\u66f8\u5185\u5305\u306e\u5f62\u614b\u7d20\u89e3\u6790\u5668\u3067\u3059.' """ if __name__ == '__main__': import timeit, sys n = int(sys.argv[1]) if len(sys.argv) > 1 else 100 print("** execute timeit() with number=%d **" % n) res = timeit.repeat(stmt='t.tokenize(s)', setup=setup, repeat=5, number=n) for i, x in enumerate(res): print("repeat %d: %f" % (i, x))
Add a tiny benchmark script.
Add a tiny benchmark script.
Python
apache-2.0
mocobeta/janome,mocobeta/janome,nakagami/janome,nakagami/janome
Add a tiny benchmark script.
# -*- coding: utf-8 -*- # Copyright 2015 moco_beta # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. setup = """ from janome.tokenizer import Tokenizer t = Tokenizer() # janome (蛇の目) は, Pure Python で書かれた, 辞書内包の形態素解析器です. s = u'janome (\u86c7\u306e\u76ee) \u306f, Pure Python \u3067\u66f8\u304b\u308c\u305f, \u8f9e\u66f8\u5185\u5305\u306e\u5f62\u614b\u7d20\u89e3\u6790\u5668\u3067\u3059.' """ if __name__ == '__main__': import timeit, sys n = int(sys.argv[1]) if len(sys.argv) > 1 else 100 print("** execute timeit() with number=%d **" % n) res = timeit.repeat(stmt='t.tokenize(s)', setup=setup, repeat=5, number=n) for i, x in enumerate(res): print("repeat %d: %f" % (i, x))
<commit_before><commit_msg>Add a tiny benchmark script.<commit_after>
# -*- coding: utf-8 -*- # Copyright 2015 moco_beta # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. setup = """ from janome.tokenizer import Tokenizer t = Tokenizer() # janome (蛇の目) は, Pure Python で書かれた, 辞書内包の形態素解析器です. s = u'janome (\u86c7\u306e\u76ee) \u306f, Pure Python \u3067\u66f8\u304b\u308c\u305f, \u8f9e\u66f8\u5185\u5305\u306e\u5f62\u614b\u7d20\u89e3\u6790\u5668\u3067\u3059.' """ if __name__ == '__main__': import timeit, sys n = int(sys.argv[1]) if len(sys.argv) > 1 else 100 print("** execute timeit() with number=%d **" % n) res = timeit.repeat(stmt='t.tokenize(s)', setup=setup, repeat=5, number=n) for i, x in enumerate(res): print("repeat %d: %f" % (i, x))
Add a tiny benchmark script.# -*- coding: utf-8 -*- # Copyright 2015 moco_beta # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. setup = """ from janome.tokenizer import Tokenizer t = Tokenizer() # janome (蛇の目) は, Pure Python で書かれた, 辞書内包の形態素解析器です. s = u'janome (\u86c7\u306e\u76ee) \u306f, Pure Python \u3067\u66f8\u304b\u308c\u305f, \u8f9e\u66f8\u5185\u5305\u306e\u5f62\u614b\u7d20\u89e3\u6790\u5668\u3067\u3059.' """ if __name__ == '__main__': import timeit, sys n = int(sys.argv[1]) if len(sys.argv) > 1 else 100 print("** execute timeit() with number=%d **" % n) res = timeit.repeat(stmt='t.tokenize(s)', setup=setup, repeat=5, number=n) for i, x in enumerate(res): print("repeat %d: %f" % (i, x))
<commit_before><commit_msg>Add a tiny benchmark script.<commit_after># -*- coding: utf-8 -*- # Copyright 2015 moco_beta # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. setup = """ from janome.tokenizer import Tokenizer t = Tokenizer() # janome (蛇の目) は, Pure Python で書かれた, 辞書内包の形態素解析器です. s = u'janome (\u86c7\u306e\u76ee) \u306f, Pure Python \u3067\u66f8\u304b\u308c\u305f, \u8f9e\u66f8\u5185\u5305\u306e\u5f62\u614b\u7d20\u89e3\u6790\u5668\u3067\u3059.' """ if __name__ == '__main__': import timeit, sys n = int(sys.argv[1]) if len(sys.argv) > 1 else 100 print("** execute timeit() with number=%d **" % n) res = timeit.repeat(stmt='t.tokenize(s)', setup=setup, repeat=5, number=n) for i, x in enumerate(res): print("repeat %d: %f" % (i, x))
c7895f0c799bc57e106c7e2f532af05609142f1b
helusers/management/commands/sync_helusers.py
helusers/management/commands/sync_helusers.py
from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.management.base import BaseCommand from django.contrib.sites.models import Site from allauth.socialaccount.models import SocialApp from helusers.providers.helsinki.provider import HelsinkiProvider class Command(BaseCommand): help = 'Create or update helusers allauth SocialApp' def handle(self, *args, **options): changed = False try: app = SocialApp.objects.get(provider=HelsinkiProvider.id) except SocialApp.DoesNotExist: app = SocialApp(provider=HelsinkiProvider.id) self.stdout.write(self.style.SUCCESS('Creating new SocialApp')) if not app.name: app.name = 'Helsingin kaupungin työntekijät' changed = True client_id = secret_key = None jwt_settings = getattr(settings, 'JWT_AUTH') if jwt_settings: client_id = jwt_settings.get('JWT_AUDIENCE') secret_key = jwt_settings.get('JWT_SECRET_KEY') if not client_id: raise ImproperlyConfigured("You must set JWT_AUTH['JWT_AUDIENCE'] to correspond to your client ID") if not secret_key: raise ImproperlyConfigured("You must set JWT_AUTH['JWT_SECRET_KEY'] to correspond to your secret key") if app.client_id != client_id: changed = True app.client_id = client_id if app.secret != secret_key: changed = True app.secret = secret_key if changed: app.save() if not app.sites.exists(): app.sites.add(Site.objects.get(id=settings.SITE_ID)) changed = True if changed: self.stdout.write(self.style.SUCCESS('SocialApp successfully updated')) else: self.stdout.write(self.style.NOTICE('Already synced -- no changes needed'))
Add management command for keeping helsinki SocialApp in sync
Add management command for keeping helsinki SocialApp in sync
Python
bsd-2-clause
City-of-Helsinki/django-helusers,City-of-Helsinki/django-helusers
Add management command for keeping helsinki SocialApp in sync
from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.management.base import BaseCommand from django.contrib.sites.models import Site from allauth.socialaccount.models import SocialApp from helusers.providers.helsinki.provider import HelsinkiProvider class Command(BaseCommand): help = 'Create or update helusers allauth SocialApp' def handle(self, *args, **options): changed = False try: app = SocialApp.objects.get(provider=HelsinkiProvider.id) except SocialApp.DoesNotExist: app = SocialApp(provider=HelsinkiProvider.id) self.stdout.write(self.style.SUCCESS('Creating new SocialApp')) if not app.name: app.name = 'Helsingin kaupungin työntekijät' changed = True client_id = secret_key = None jwt_settings = getattr(settings, 'JWT_AUTH') if jwt_settings: client_id = jwt_settings.get('JWT_AUDIENCE') secret_key = jwt_settings.get('JWT_SECRET_KEY') if not client_id: raise ImproperlyConfigured("You must set JWT_AUTH['JWT_AUDIENCE'] to correspond to your client ID") if not secret_key: raise ImproperlyConfigured("You must set JWT_AUTH['JWT_SECRET_KEY'] to correspond to your secret key") if app.client_id != client_id: changed = True app.client_id = client_id if app.secret != secret_key: changed = True app.secret = secret_key if changed: app.save() if not app.sites.exists(): app.sites.add(Site.objects.get(id=settings.SITE_ID)) changed = True if changed: self.stdout.write(self.style.SUCCESS('SocialApp successfully updated')) else: self.stdout.write(self.style.NOTICE('Already synced -- no changes needed'))
<commit_before><commit_msg>Add management command for keeping helsinki SocialApp in sync<commit_after>
from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.management.base import BaseCommand from django.contrib.sites.models import Site from allauth.socialaccount.models import SocialApp from helusers.providers.helsinki.provider import HelsinkiProvider class Command(BaseCommand): help = 'Create or update helusers allauth SocialApp' def handle(self, *args, **options): changed = False try: app = SocialApp.objects.get(provider=HelsinkiProvider.id) except SocialApp.DoesNotExist: app = SocialApp(provider=HelsinkiProvider.id) self.stdout.write(self.style.SUCCESS('Creating new SocialApp')) if not app.name: app.name = 'Helsingin kaupungin työntekijät' changed = True client_id = secret_key = None jwt_settings = getattr(settings, 'JWT_AUTH') if jwt_settings: client_id = jwt_settings.get('JWT_AUDIENCE') secret_key = jwt_settings.get('JWT_SECRET_KEY') if not client_id: raise ImproperlyConfigured("You must set JWT_AUTH['JWT_AUDIENCE'] to correspond to your client ID") if not secret_key: raise ImproperlyConfigured("You must set JWT_AUTH['JWT_SECRET_KEY'] to correspond to your secret key") if app.client_id != client_id: changed = True app.client_id = client_id if app.secret != secret_key: changed = True app.secret = secret_key if changed: app.save() if not app.sites.exists(): app.sites.add(Site.objects.get(id=settings.SITE_ID)) changed = True if changed: self.stdout.write(self.style.SUCCESS('SocialApp successfully updated')) else: self.stdout.write(self.style.NOTICE('Already synced -- no changes needed'))
Add management command for keeping helsinki SocialApp in syncfrom django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.management.base import BaseCommand from django.contrib.sites.models import Site from allauth.socialaccount.models import SocialApp from helusers.providers.helsinki.provider import HelsinkiProvider class Command(BaseCommand): help = 'Create or update helusers allauth SocialApp' def handle(self, *args, **options): changed = False try: app = SocialApp.objects.get(provider=HelsinkiProvider.id) except SocialApp.DoesNotExist: app = SocialApp(provider=HelsinkiProvider.id) self.stdout.write(self.style.SUCCESS('Creating new SocialApp')) if not app.name: app.name = 'Helsingin kaupungin työntekijät' changed = True client_id = secret_key = None jwt_settings = getattr(settings, 'JWT_AUTH') if jwt_settings: client_id = jwt_settings.get('JWT_AUDIENCE') secret_key = jwt_settings.get('JWT_SECRET_KEY') if not client_id: raise ImproperlyConfigured("You must set JWT_AUTH['JWT_AUDIENCE'] to correspond to your client ID") if not secret_key: raise ImproperlyConfigured("You must set JWT_AUTH['JWT_SECRET_KEY'] to correspond to your secret key") if app.client_id != client_id: changed = True app.client_id = client_id if app.secret != secret_key: changed = True app.secret = secret_key if changed: app.save() if not app.sites.exists(): app.sites.add(Site.objects.get(id=settings.SITE_ID)) changed = True if changed: self.stdout.write(self.style.SUCCESS('SocialApp successfully updated')) else: self.stdout.write(self.style.NOTICE('Already synced -- no changes needed'))
<commit_before><commit_msg>Add management command for keeping helsinki SocialApp in sync<commit_after>from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.core.management.base import BaseCommand from django.contrib.sites.models import Site from allauth.socialaccount.models import SocialApp from helusers.providers.helsinki.provider import HelsinkiProvider class Command(BaseCommand): help = 'Create or update helusers allauth SocialApp' def handle(self, *args, **options): changed = False try: app = SocialApp.objects.get(provider=HelsinkiProvider.id) except SocialApp.DoesNotExist: app = SocialApp(provider=HelsinkiProvider.id) self.stdout.write(self.style.SUCCESS('Creating new SocialApp')) if not app.name: app.name = 'Helsingin kaupungin työntekijät' changed = True client_id = secret_key = None jwt_settings = getattr(settings, 'JWT_AUTH') if jwt_settings: client_id = jwt_settings.get('JWT_AUDIENCE') secret_key = jwt_settings.get('JWT_SECRET_KEY') if not client_id: raise ImproperlyConfigured("You must set JWT_AUTH['JWT_AUDIENCE'] to correspond to your client ID") if not secret_key: raise ImproperlyConfigured("You must set JWT_AUTH['JWT_SECRET_KEY'] to correspond to your secret key") if app.client_id != client_id: changed = True app.client_id = client_id if app.secret != secret_key: changed = True app.secret = secret_key if changed: app.save() if not app.sites.exists(): app.sites.add(Site.objects.get(id=settings.SITE_ID)) changed = True if changed: self.stdout.write(self.style.SUCCESS('SocialApp successfully updated')) else: self.stdout.write(self.style.NOTICE('Already synced -- no changes needed'))
b589e6791ffdb85dfb7f11b331f090e6a9a0d874
senlin/db/sqlalchemy/migrate_repo/versions/006_node_cluster_dependents_column.py
senlin/db/sqlalchemy/migrate_repo/versions/006_node_cluster_dependents_column.py
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, MetaData, Table from senlin.db.sqlalchemy import types def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine node = Table('node', meta, autoload=True) node_dependents = Column('dependents', types.Dict()) node_dependents.create(node) cluster = Table('cluster', meta, autoload=True) cluster_dependents = Column('dependents', types.Dict()) cluster_dependents.create(cluster)
Add dependents column to node and cluster tables
Add dependents column to node and cluster tables This patch adds a new column 'dependents' to node and cluster tables. 'dependents' column will be used to store the relationship between vm node/cluster and container node/cluster. Change-Id: Ib4ebb3eafb986ed776dead5f616062606631190c
Python
apache-2.0
stackforge/senlin,openstack/senlin,openstack/senlin,stackforge/senlin,openstack/senlin
Add dependents column to node and cluster tables This patch adds a new column 'dependents' to node and cluster tables. 'dependents' column will be used to store the relationship between vm node/cluster and container node/cluster. Change-Id: Ib4ebb3eafb986ed776dead5f616062606631190c
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, MetaData, Table from senlin.db.sqlalchemy import types def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine node = Table('node', meta, autoload=True) node_dependents = Column('dependents', types.Dict()) node_dependents.create(node) cluster = Table('cluster', meta, autoload=True) cluster_dependents = Column('dependents', types.Dict()) cluster_dependents.create(cluster)
<commit_before><commit_msg>Add dependents column to node and cluster tables This patch adds a new column 'dependents' to node and cluster tables. 'dependents' column will be used to store the relationship between vm node/cluster and container node/cluster. Change-Id: Ib4ebb3eafb986ed776dead5f616062606631190c<commit_after>
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, MetaData, Table from senlin.db.sqlalchemy import types def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine node = Table('node', meta, autoload=True) node_dependents = Column('dependents', types.Dict()) node_dependents.create(node) cluster = Table('cluster', meta, autoload=True) cluster_dependents = Column('dependents', types.Dict()) cluster_dependents.create(cluster)
Add dependents column to node and cluster tables This patch adds a new column 'dependents' to node and cluster tables. 'dependents' column will be used to store the relationship between vm node/cluster and container node/cluster. Change-Id: Ib4ebb3eafb986ed776dead5f616062606631190c# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, MetaData, Table from senlin.db.sqlalchemy import types def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine node = Table('node', meta, autoload=True) node_dependents = Column('dependents', types.Dict()) node_dependents.create(node) cluster = Table('cluster', meta, autoload=True) cluster_dependents = Column('dependents', types.Dict()) cluster_dependents.create(cluster)
<commit_before><commit_msg>Add dependents column to node and cluster tables This patch adds a new column 'dependents' to node and cluster tables. 'dependents' column will be used to store the relationship between vm node/cluster and container node/cluster. Change-Id: Ib4ebb3eafb986ed776dead5f616062606631190c<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import Column, MetaData, Table from senlin.db.sqlalchemy import types def upgrade(migrate_engine): meta = MetaData() meta.bind = migrate_engine node = Table('node', meta, autoload=True) node_dependents = Column('dependents', types.Dict()) node_dependents.create(node) cluster = Table('cluster', meta, autoload=True) cluster_dependents = Column('dependents', types.Dict()) cluster_dependents.create(cluster)
3c627a70c53488526d11911b612ce524c3799092
src/dashboard/src/main/migrations/0052_correct_extract_packages_fallback_link.py
src/dashboard/src/main/migrations/0052_correct_extract_packages_fallback_link.py
# -*- coding: utf-8 -*- """0052_correct_extact_packages_fallback_link.py Migration to ensure that if extract contents from compressed archives fails that the workflow doesn't continue to trudge along to completion where there is a likelihood of other errors. """ from __future__ import unicode_literals from django.db import migrations def data_migration(apps, schema_editor): """Update two fields in the Dashboard data model for the two affected transfer types. """ std_transfer_extract_packages_link = "1cb7e228-6e94-4c93-bf70-430af99b9264" dspace_extract_packages_link = "bd792750-a55b-42e9-903a-8c898bb77df1" failed_transfer_link = "61c316a6-0a50-4f65-8767-1f44b1eeb6dd" MicroServiceChainLink = apps.get_model('main', 'MicroServiceChainLink') MicroServiceChainLink.objects\ .filter(id=dspace_extract_packages_link)\ .update(defaultnextchainlink=failed_transfer_link) MicroServiceChainLink.objects\ .filter(id=std_transfer_extract_packages_link)\ .update(defaultnextchainlink=failed_transfer_link) class Migration(migrations.Migration): """Entry point for the migration.""" dependencies = [('main', '0051_remove_verify_premis_checksums')] operations = [ migrations.RunPython(data_migration), ]
Update defaultNextChainLink for extract packages
Update defaultNextChainLink for extract packages There are two extract packages microservices which continue to process data even if they have failed to extract content, or the service itself has failed for some other reason. Make sure that the Task falls back to 'Failed Transfer' so that the user knows and a decision can be made based on the content of the transfer.
Python
agpl-3.0
artefactual/archivematica,artefactual/archivematica,artefactual/archivematica,artefactual/archivematica
Update defaultNextChainLink for extract packages There are two extract packages microservices which continue to process data even if they have failed to extract content, or the service itself has failed for some other reason. Make sure that the Task falls back to 'Failed Transfer' so that the user knows and a decision can be made based on the content of the transfer.
# -*- coding: utf-8 -*- """0052_correct_extact_packages_fallback_link.py Migration to ensure that if extract contents from compressed archives fails that the workflow doesn't continue to trudge along to completion where there is a likelihood of other errors. """ from __future__ import unicode_literals from django.db import migrations def data_migration(apps, schema_editor): """Update two fields in the Dashboard data model for the two affected transfer types. """ std_transfer_extract_packages_link = "1cb7e228-6e94-4c93-bf70-430af99b9264" dspace_extract_packages_link = "bd792750-a55b-42e9-903a-8c898bb77df1" failed_transfer_link = "61c316a6-0a50-4f65-8767-1f44b1eeb6dd" MicroServiceChainLink = apps.get_model('main', 'MicroServiceChainLink') MicroServiceChainLink.objects\ .filter(id=dspace_extract_packages_link)\ .update(defaultnextchainlink=failed_transfer_link) MicroServiceChainLink.objects\ .filter(id=std_transfer_extract_packages_link)\ .update(defaultnextchainlink=failed_transfer_link) class Migration(migrations.Migration): """Entry point for the migration.""" dependencies = [('main', '0051_remove_verify_premis_checksums')] operations = [ migrations.RunPython(data_migration), ]
<commit_before><commit_msg>Update defaultNextChainLink for extract packages There are two extract packages microservices which continue to process data even if they have failed to extract content, or the service itself has failed for some other reason. Make sure that the Task falls back to 'Failed Transfer' so that the user knows and a decision can be made based on the content of the transfer.<commit_after>
# -*- coding: utf-8 -*- """0052_correct_extact_packages_fallback_link.py Migration to ensure that if extract contents from compressed archives fails that the workflow doesn't continue to trudge along to completion where there is a likelihood of other errors. """ from __future__ import unicode_literals from django.db import migrations def data_migration(apps, schema_editor): """Update two fields in the Dashboard data model for the two affected transfer types. """ std_transfer_extract_packages_link = "1cb7e228-6e94-4c93-bf70-430af99b9264" dspace_extract_packages_link = "bd792750-a55b-42e9-903a-8c898bb77df1" failed_transfer_link = "61c316a6-0a50-4f65-8767-1f44b1eeb6dd" MicroServiceChainLink = apps.get_model('main', 'MicroServiceChainLink') MicroServiceChainLink.objects\ .filter(id=dspace_extract_packages_link)\ .update(defaultnextchainlink=failed_transfer_link) MicroServiceChainLink.objects\ .filter(id=std_transfer_extract_packages_link)\ .update(defaultnextchainlink=failed_transfer_link) class Migration(migrations.Migration): """Entry point for the migration.""" dependencies = [('main', '0051_remove_verify_premis_checksums')] operations = [ migrations.RunPython(data_migration), ]
Update defaultNextChainLink for extract packages There are two extract packages microservices which continue to process data even if they have failed to extract content, or the service itself has failed for some other reason. Make sure that the Task falls back to 'Failed Transfer' so that the user knows and a decision can be made based on the content of the transfer.# -*- coding: utf-8 -*- """0052_correct_extact_packages_fallback_link.py Migration to ensure that if extract contents from compressed archives fails that the workflow doesn't continue to trudge along to completion where there is a likelihood of other errors. """ from __future__ import unicode_literals from django.db import migrations def data_migration(apps, schema_editor): """Update two fields in the Dashboard data model for the two affected transfer types. """ std_transfer_extract_packages_link = "1cb7e228-6e94-4c93-bf70-430af99b9264" dspace_extract_packages_link = "bd792750-a55b-42e9-903a-8c898bb77df1" failed_transfer_link = "61c316a6-0a50-4f65-8767-1f44b1eeb6dd" MicroServiceChainLink = apps.get_model('main', 'MicroServiceChainLink') MicroServiceChainLink.objects\ .filter(id=dspace_extract_packages_link)\ .update(defaultnextchainlink=failed_transfer_link) MicroServiceChainLink.objects\ .filter(id=std_transfer_extract_packages_link)\ .update(defaultnextchainlink=failed_transfer_link) class Migration(migrations.Migration): """Entry point for the migration.""" dependencies = [('main', '0051_remove_verify_premis_checksums')] operations = [ migrations.RunPython(data_migration), ]
<commit_before><commit_msg>Update defaultNextChainLink for extract packages There are two extract packages microservices which continue to process data even if they have failed to extract content, or the service itself has failed for some other reason. Make sure that the Task falls back to 'Failed Transfer' so that the user knows and a decision can be made based on the content of the transfer.<commit_after># -*- coding: utf-8 -*- """0052_correct_extact_packages_fallback_link.py Migration to ensure that if extract contents from compressed archives fails that the workflow doesn't continue to trudge along to completion where there is a likelihood of other errors. """ from __future__ import unicode_literals from django.db import migrations def data_migration(apps, schema_editor): """Update two fields in the Dashboard data model for the two affected transfer types. """ std_transfer_extract_packages_link = "1cb7e228-6e94-4c93-bf70-430af99b9264" dspace_extract_packages_link = "bd792750-a55b-42e9-903a-8c898bb77df1" failed_transfer_link = "61c316a6-0a50-4f65-8767-1f44b1eeb6dd" MicroServiceChainLink = apps.get_model('main', 'MicroServiceChainLink') MicroServiceChainLink.objects\ .filter(id=dspace_extract_packages_link)\ .update(defaultnextchainlink=failed_transfer_link) MicroServiceChainLink.objects\ .filter(id=std_transfer_extract_packages_link)\ .update(defaultnextchainlink=failed_transfer_link) class Migration(migrations.Migration): """Entry point for the migration.""" dependencies = [('main', '0051_remove_verify_premis_checksums')] operations = [ migrations.RunPython(data_migration), ]
1ba2365678c3118b8b3e7629767bf2bfa4ec04e6
editorconfig.py
editorconfig.py
import os from ini import EditorConfigParser def get_filenames(path, filename): """Yield full filepath for filename in each directory in and above path""" while True: yield os.path.join(path, filename) newpath = os.path.dirname(path) if path == newpath: break path = newpath class EditorConfigHandler(object): """Allows locating and parsing of EditorConfig files for a given filename""" def __init__(self, filepath, conf_filename='.editorconfig'): """Create EditorConfigHandler for matching given filepath""" self.filepath = filepath self.conf_filename = conf_filename self.options = None def preprocess_values(self): opts = self.options # Lowercase option value for certain options for name in ["end_of_line", "indent_style", "indent_size"]: if name in opts: opts[name] = opts[name].lower() # Set indent_size to "tab" if indent_size is unspecified and # indent_style is set to "tab". if (opts.get("indent_style") == "tab" and not opts.has_key("indent_size")): opts["indent_size"] = "tab" # Set tab_width to indent_size if indent_size is specified and tab_width # is unspecified if (opts.has_key("indent_size") and not opts.has_key("tab_width") and opts["indent_size"] != "tab"): opts["tab_width"] = opts["indent_size"] def get_configurations(self): """Find EditorConfig files and return all options matching filepath""" path, filename = os.path.split(self.filepath) conf_files = get_filenames(path, self.conf_filename) for filename in conf_files: parser = EditorConfigParser(self.filepath) parser.read(filename) old_options = self.options self.options = parser.options if old_options: self.options.update(old_options) if parser.root_file: break self.preprocess_values() return self.options
Add class to locate and parse EditorConfig files
Add class to locate and parse EditorConfig files
Python
bsd-2-clause
dublebuble/editorconfig-gedit,dublebuble/editorconfig-gedit,pocke/editorconfig-vim,benjifisher/editorconfig-vim,johnfraney/editorconfig-vim,benjifisher/editorconfig-vim,johnfraney/editorconfig-vim,VictorBjelkholm/editorconfig-vim,VictorBjelkholm/editorconfig-vim,pocke/editorconfig-vim,VictorBjelkholm/editorconfig-vim,dublebuble/editorconfig-gedit,pocke/editorconfig-vim,benjifisher/editorconfig-vim,johnfraney/editorconfig-vim
Add class to locate and parse EditorConfig files
import os from ini import EditorConfigParser def get_filenames(path, filename): """Yield full filepath for filename in each directory in and above path""" while True: yield os.path.join(path, filename) newpath = os.path.dirname(path) if path == newpath: break path = newpath class EditorConfigHandler(object): """Allows locating and parsing of EditorConfig files for a given filename""" def __init__(self, filepath, conf_filename='.editorconfig'): """Create EditorConfigHandler for matching given filepath""" self.filepath = filepath self.conf_filename = conf_filename self.options = None def preprocess_values(self): opts = self.options # Lowercase option value for certain options for name in ["end_of_line", "indent_style", "indent_size"]: if name in opts: opts[name] = opts[name].lower() # Set indent_size to "tab" if indent_size is unspecified and # indent_style is set to "tab". if (opts.get("indent_style") == "tab" and not opts.has_key("indent_size")): opts["indent_size"] = "tab" # Set tab_width to indent_size if indent_size is specified and tab_width # is unspecified if (opts.has_key("indent_size") and not opts.has_key("tab_width") and opts["indent_size"] != "tab"): opts["tab_width"] = opts["indent_size"] def get_configurations(self): """Find EditorConfig files and return all options matching filepath""" path, filename = os.path.split(self.filepath) conf_files = get_filenames(path, self.conf_filename) for filename in conf_files: parser = EditorConfigParser(self.filepath) parser.read(filename) old_options = self.options self.options = parser.options if old_options: self.options.update(old_options) if parser.root_file: break self.preprocess_values() return self.options
<commit_before><commit_msg>Add class to locate and parse EditorConfig files<commit_after>
import os from ini import EditorConfigParser def get_filenames(path, filename): """Yield full filepath for filename in each directory in and above path""" while True: yield os.path.join(path, filename) newpath = os.path.dirname(path) if path == newpath: break path = newpath class EditorConfigHandler(object): """Allows locating and parsing of EditorConfig files for a given filename""" def __init__(self, filepath, conf_filename='.editorconfig'): """Create EditorConfigHandler for matching given filepath""" self.filepath = filepath self.conf_filename = conf_filename self.options = None def preprocess_values(self): opts = self.options # Lowercase option value for certain options for name in ["end_of_line", "indent_style", "indent_size"]: if name in opts: opts[name] = opts[name].lower() # Set indent_size to "tab" if indent_size is unspecified and # indent_style is set to "tab". if (opts.get("indent_style") == "tab" and not opts.has_key("indent_size")): opts["indent_size"] = "tab" # Set tab_width to indent_size if indent_size is specified and tab_width # is unspecified if (opts.has_key("indent_size") and not opts.has_key("tab_width") and opts["indent_size"] != "tab"): opts["tab_width"] = opts["indent_size"] def get_configurations(self): """Find EditorConfig files and return all options matching filepath""" path, filename = os.path.split(self.filepath) conf_files = get_filenames(path, self.conf_filename) for filename in conf_files: parser = EditorConfigParser(self.filepath) parser.read(filename) old_options = self.options self.options = parser.options if old_options: self.options.update(old_options) if parser.root_file: break self.preprocess_values() return self.options
Add class to locate and parse EditorConfig filesimport os from ini import EditorConfigParser def get_filenames(path, filename): """Yield full filepath for filename in each directory in and above path""" while True: yield os.path.join(path, filename) newpath = os.path.dirname(path) if path == newpath: break path = newpath class EditorConfigHandler(object): """Allows locating and parsing of EditorConfig files for a given filename""" def __init__(self, filepath, conf_filename='.editorconfig'): """Create EditorConfigHandler for matching given filepath""" self.filepath = filepath self.conf_filename = conf_filename self.options = None def preprocess_values(self): opts = self.options # Lowercase option value for certain options for name in ["end_of_line", "indent_style", "indent_size"]: if name in opts: opts[name] = opts[name].lower() # Set indent_size to "tab" if indent_size is unspecified and # indent_style is set to "tab". if (opts.get("indent_style") == "tab" and not opts.has_key("indent_size")): opts["indent_size"] = "tab" # Set tab_width to indent_size if indent_size is specified and tab_width # is unspecified if (opts.has_key("indent_size") and not opts.has_key("tab_width") and opts["indent_size"] != "tab"): opts["tab_width"] = opts["indent_size"] def get_configurations(self): """Find EditorConfig files and return all options matching filepath""" path, filename = os.path.split(self.filepath) conf_files = get_filenames(path, self.conf_filename) for filename in conf_files: parser = EditorConfigParser(self.filepath) parser.read(filename) old_options = self.options self.options = parser.options if old_options: self.options.update(old_options) if parser.root_file: break self.preprocess_values() return self.options
<commit_before><commit_msg>Add class to locate and parse EditorConfig files<commit_after>import os from ini import EditorConfigParser def get_filenames(path, filename): """Yield full filepath for filename in each directory in and above path""" while True: yield os.path.join(path, filename) newpath = os.path.dirname(path) if path == newpath: break path = newpath class EditorConfigHandler(object): """Allows locating and parsing of EditorConfig files for a given filename""" def __init__(self, filepath, conf_filename='.editorconfig'): """Create EditorConfigHandler for matching given filepath""" self.filepath = filepath self.conf_filename = conf_filename self.options = None def preprocess_values(self): opts = self.options # Lowercase option value for certain options for name in ["end_of_line", "indent_style", "indent_size"]: if name in opts: opts[name] = opts[name].lower() # Set indent_size to "tab" if indent_size is unspecified and # indent_style is set to "tab". if (opts.get("indent_style") == "tab" and not opts.has_key("indent_size")): opts["indent_size"] = "tab" # Set tab_width to indent_size if indent_size is specified and tab_width # is unspecified if (opts.has_key("indent_size") and not opts.has_key("tab_width") and opts["indent_size"] != "tab"): opts["tab_width"] = opts["indent_size"] def get_configurations(self): """Find EditorConfig files and return all options matching filepath""" path, filename = os.path.split(self.filepath) conf_files = get_filenames(path, self.conf_filename) for filename in conf_files: parser = EditorConfigParser(self.filepath) parser.read(filename) old_options = self.options self.options = parser.options if old_options: self.options.update(old_options) if parser.root_file: break self.preprocess_values() return self.options
8e195ab55be87cdd300c71b00d2dc64bb0d6c3ca
app/admin/forms.py
app/admin/forms.py
from flask.ext.wtf import Form from wtforms import StringField, TextAreaField, SubmitField from wtforms.validators import Required, Length, Email, Optional class ProfileForm(Form): name = StringField('Nafn', validators=[Optional(), Length(1,64)]) location = StringField('Staðsetning', validators=[Optional(), Length(1,64)]) bio = TextAreaField('Um', validators=[Optional()]) submit = SubmitField('Breyta')
Add a ProfileForm to modify the user profile
Add a ProfileForm to modify the user profile
Python
mit
finnurtorfa/aflafrettir.is,finnurtorfa/aflafrettir.is,finnurtorfa/aflafrettir.is,finnurtorfa/aflafrettir.is
Add a ProfileForm to modify the user profile
from flask.ext.wtf import Form from wtforms import StringField, TextAreaField, SubmitField from wtforms.validators import Required, Length, Email, Optional class ProfileForm(Form): name = StringField('Nafn', validators=[Optional(), Length(1,64)]) location = StringField('Staðsetning', validators=[Optional(), Length(1,64)]) bio = TextAreaField('Um', validators=[Optional()]) submit = SubmitField('Breyta')
<commit_before><commit_msg>Add a ProfileForm to modify the user profile<commit_after>
from flask.ext.wtf import Form from wtforms import StringField, TextAreaField, SubmitField from wtforms.validators import Required, Length, Email, Optional class ProfileForm(Form): name = StringField('Nafn', validators=[Optional(), Length(1,64)]) location = StringField('Staðsetning', validators=[Optional(), Length(1,64)]) bio = TextAreaField('Um', validators=[Optional()]) submit = SubmitField('Breyta')
Add a ProfileForm to modify the user profilefrom flask.ext.wtf import Form from wtforms import StringField, TextAreaField, SubmitField from wtforms.validators import Required, Length, Email, Optional class ProfileForm(Form): name = StringField('Nafn', validators=[Optional(), Length(1,64)]) location = StringField('Staðsetning', validators=[Optional(), Length(1,64)]) bio = TextAreaField('Um', validators=[Optional()]) submit = SubmitField('Breyta')
<commit_before><commit_msg>Add a ProfileForm to modify the user profile<commit_after>from flask.ext.wtf import Form from wtforms import StringField, TextAreaField, SubmitField from wtforms.validators import Required, Length, Email, Optional class ProfileForm(Form): name = StringField('Nafn', validators=[Optional(), Length(1,64)]) location = StringField('Staðsetning', validators=[Optional(), Length(1,64)]) bio = TextAreaField('Um', validators=[Optional()]) submit = SubmitField('Breyta')
a8d8027b8610d5f710638511cd0adb277de381c2
solutions/uppg2.py
solutions/uppg2.py
# coding: utf-8 def load_book(path): d = {} for rad in open('bok.txt'): (namn, _, nummer) = rad.partition(';') d[namn] = nummer.strip() return d def save_book(book, path): f = open('bok.txt', 'w') for name in book: f.write('%s;%s\n' % (name, book[name])) def print_menu(bok): print "\n** Meny **" print "Jag känner till %d telefonnummer." % len(bok) print "1. Slå upp telefonnummer" print "2. Lägg till nytt telefonnummer" print "3. Avsluta" def do_lookup(book): namn = raw_input("Ange namn: ") uppslag = namn.lower() if uppslag in book: print "%ss telefonnummer är: %s" % (namn, book[uppslag]) else: print "Hittar inte %s i telefonboken!" % namn def do_add_number(book, path): nummer = raw_input("Nytt nummer: ") namn = raw_input("Nytt namn: ") uppslag = namn.lower() book[uppslag] = nummer save_book(book, path) def run(): print "Välkommen till Telefonboken!" book = load_book('bok.txt') while True: print_menu(book) choice = int(raw_input("Ange val: ")) if choice == 0: print book if choice not in [1, 2, 3]: print "Det finns inget sådant val!" continue if choice == 1: do_lookup(book) if choice == 2: do_add_number(book, 'bok.txt') if choice == 3: break if __name__ == '__main__': run()
Add solution to problem 2
Add solution to problem 2
Python
mit
objarni/telefonboken
Add solution to problem 2
# coding: utf-8 def load_book(path): d = {} for rad in open('bok.txt'): (namn, _, nummer) = rad.partition(';') d[namn] = nummer.strip() return d def save_book(book, path): f = open('bok.txt', 'w') for name in book: f.write('%s;%s\n' % (name, book[name])) def print_menu(bok): print "\n** Meny **" print "Jag känner till %d telefonnummer." % len(bok) print "1. Slå upp telefonnummer" print "2. Lägg till nytt telefonnummer" print "3. Avsluta" def do_lookup(book): namn = raw_input("Ange namn: ") uppslag = namn.lower() if uppslag in book: print "%ss telefonnummer är: %s" % (namn, book[uppslag]) else: print "Hittar inte %s i telefonboken!" % namn def do_add_number(book, path): nummer = raw_input("Nytt nummer: ") namn = raw_input("Nytt namn: ") uppslag = namn.lower() book[uppslag] = nummer save_book(book, path) def run(): print "Välkommen till Telefonboken!" book = load_book('bok.txt') while True: print_menu(book) choice = int(raw_input("Ange val: ")) if choice == 0: print book if choice not in [1, 2, 3]: print "Det finns inget sådant val!" continue if choice == 1: do_lookup(book) if choice == 2: do_add_number(book, 'bok.txt') if choice == 3: break if __name__ == '__main__': run()
<commit_before><commit_msg>Add solution to problem 2<commit_after>
# coding: utf-8 def load_book(path): d = {} for rad in open('bok.txt'): (namn, _, nummer) = rad.partition(';') d[namn] = nummer.strip() return d def save_book(book, path): f = open('bok.txt', 'w') for name in book: f.write('%s;%s\n' % (name, book[name])) def print_menu(bok): print "\n** Meny **" print "Jag känner till %d telefonnummer." % len(bok) print "1. Slå upp telefonnummer" print "2. Lägg till nytt telefonnummer" print "3. Avsluta" def do_lookup(book): namn = raw_input("Ange namn: ") uppslag = namn.lower() if uppslag in book: print "%ss telefonnummer är: %s" % (namn, book[uppslag]) else: print "Hittar inte %s i telefonboken!" % namn def do_add_number(book, path): nummer = raw_input("Nytt nummer: ") namn = raw_input("Nytt namn: ") uppslag = namn.lower() book[uppslag] = nummer save_book(book, path) def run(): print "Välkommen till Telefonboken!" book = load_book('bok.txt') while True: print_menu(book) choice = int(raw_input("Ange val: ")) if choice == 0: print book if choice not in [1, 2, 3]: print "Det finns inget sådant val!" continue if choice == 1: do_lookup(book) if choice == 2: do_add_number(book, 'bok.txt') if choice == 3: break if __name__ == '__main__': run()
Add solution to problem 2# coding: utf-8 def load_book(path): d = {} for rad in open('bok.txt'): (namn, _, nummer) = rad.partition(';') d[namn] = nummer.strip() return d def save_book(book, path): f = open('bok.txt', 'w') for name in book: f.write('%s;%s\n' % (name, book[name])) def print_menu(bok): print "\n** Meny **" print "Jag känner till %d telefonnummer." % len(bok) print "1. Slå upp telefonnummer" print "2. Lägg till nytt telefonnummer" print "3. Avsluta" def do_lookup(book): namn = raw_input("Ange namn: ") uppslag = namn.lower() if uppslag in book: print "%ss telefonnummer är: %s" % (namn, book[uppslag]) else: print "Hittar inte %s i telefonboken!" % namn def do_add_number(book, path): nummer = raw_input("Nytt nummer: ") namn = raw_input("Nytt namn: ") uppslag = namn.lower() book[uppslag] = nummer save_book(book, path) def run(): print "Välkommen till Telefonboken!" book = load_book('bok.txt') while True: print_menu(book) choice = int(raw_input("Ange val: ")) if choice == 0: print book if choice not in [1, 2, 3]: print "Det finns inget sådant val!" continue if choice == 1: do_lookup(book) if choice == 2: do_add_number(book, 'bok.txt') if choice == 3: break if __name__ == '__main__': run()
<commit_before><commit_msg>Add solution to problem 2<commit_after># coding: utf-8 def load_book(path): d = {} for rad in open('bok.txt'): (namn, _, nummer) = rad.partition(';') d[namn] = nummer.strip() return d def save_book(book, path): f = open('bok.txt', 'w') for name in book: f.write('%s;%s\n' % (name, book[name])) def print_menu(bok): print "\n** Meny **" print "Jag känner till %d telefonnummer." % len(bok) print "1. Slå upp telefonnummer" print "2. Lägg till nytt telefonnummer" print "3. Avsluta" def do_lookup(book): namn = raw_input("Ange namn: ") uppslag = namn.lower() if uppslag in book: print "%ss telefonnummer är: %s" % (namn, book[uppslag]) else: print "Hittar inte %s i telefonboken!" % namn def do_add_number(book, path): nummer = raw_input("Nytt nummer: ") namn = raw_input("Nytt namn: ") uppslag = namn.lower() book[uppslag] = nummer save_book(book, path) def run(): print "Välkommen till Telefonboken!" book = load_book('bok.txt') while True: print_menu(book) choice = int(raw_input("Ange val: ")) if choice == 0: print book if choice not in [1, 2, 3]: print "Det finns inget sådant val!" continue if choice == 1: do_lookup(book) if choice == 2: do_add_number(book, 'bok.txt') if choice == 3: break if __name__ == '__main__': run()
f486504e4ae3758fee37ac2f94dc289c861ebcdd
data/.scripts/parse_foreign_languages_listing.py
data/.scripts/parse_foreign_languages_listing.py
#!/usr/bin/env python3 import bs4 from bs4 import BeautifulSoup from collections import defaultdict def get_table_tbody(soup): return soup.body.table.tbody.contents def extract_course_from_tr(tr): tds = tr.find_all('td') if tds[0].a is None: return None return { 'courseNumber': tds[0].a.string, 'title': tds[1].string } def extract_courses_from_soup(soup): tbody = get_table_tbody(soup) courses = [] for tr in tbody: if type(tr) == bs4.element.Tag: course = extract_course_from_tr(tr) if course is not None: courses.append(course) return courses def main(document): soup = BeautifulSoup(document) courses = extract_courses_from_soup(soup) filtered_courses = filter(lambda c: c['title'].startswith('Elementary'), courses) course_numbers = [c['courseNumber'].replace(u'\xa0', ' ') for c in filtered_courses] subject_areas = defaultdict(list) for cn in course_numbers: sa, n = cn.split(' ') subject_areas[sa].append(n) return subject_areas
Add script for parsing foreign language listing
Add script for parsing foreign language listing
Python
bsd-2-clause
mDibyo/berkeley-scheduler,mDibyo/berkeley-scheduler,mDibyo/berkeley-scheduler,mDibyo/berkeley-scheduler,mDibyo/berkeley-scheduler
Add script for parsing foreign language listing
#!/usr/bin/env python3 import bs4 from bs4 import BeautifulSoup from collections import defaultdict def get_table_tbody(soup): return soup.body.table.tbody.contents def extract_course_from_tr(tr): tds = tr.find_all('td') if tds[0].a is None: return None return { 'courseNumber': tds[0].a.string, 'title': tds[1].string } def extract_courses_from_soup(soup): tbody = get_table_tbody(soup) courses = [] for tr in tbody: if type(tr) == bs4.element.Tag: course = extract_course_from_tr(tr) if course is not None: courses.append(course) return courses def main(document): soup = BeautifulSoup(document) courses = extract_courses_from_soup(soup) filtered_courses = filter(lambda c: c['title'].startswith('Elementary'), courses) course_numbers = [c['courseNumber'].replace(u'\xa0', ' ') for c in filtered_courses] subject_areas = defaultdict(list) for cn in course_numbers: sa, n = cn.split(' ') subject_areas[sa].append(n) return subject_areas
<commit_before><commit_msg>Add script for parsing foreign language listing<commit_after>
#!/usr/bin/env python3 import bs4 from bs4 import BeautifulSoup from collections import defaultdict def get_table_tbody(soup): return soup.body.table.tbody.contents def extract_course_from_tr(tr): tds = tr.find_all('td') if tds[0].a is None: return None return { 'courseNumber': tds[0].a.string, 'title': tds[1].string } def extract_courses_from_soup(soup): tbody = get_table_tbody(soup) courses = [] for tr in tbody: if type(tr) == bs4.element.Tag: course = extract_course_from_tr(tr) if course is not None: courses.append(course) return courses def main(document): soup = BeautifulSoup(document) courses = extract_courses_from_soup(soup) filtered_courses = filter(lambda c: c['title'].startswith('Elementary'), courses) course_numbers = [c['courseNumber'].replace(u'\xa0', ' ') for c in filtered_courses] subject_areas = defaultdict(list) for cn in course_numbers: sa, n = cn.split(' ') subject_areas[sa].append(n) return subject_areas
Add script for parsing foreign language listing#!/usr/bin/env python3 import bs4 from bs4 import BeautifulSoup from collections import defaultdict def get_table_tbody(soup): return soup.body.table.tbody.contents def extract_course_from_tr(tr): tds = tr.find_all('td') if tds[0].a is None: return None return { 'courseNumber': tds[0].a.string, 'title': tds[1].string } def extract_courses_from_soup(soup): tbody = get_table_tbody(soup) courses = [] for tr in tbody: if type(tr) == bs4.element.Tag: course = extract_course_from_tr(tr) if course is not None: courses.append(course) return courses def main(document): soup = BeautifulSoup(document) courses = extract_courses_from_soup(soup) filtered_courses = filter(lambda c: c['title'].startswith('Elementary'), courses) course_numbers = [c['courseNumber'].replace(u'\xa0', ' ') for c in filtered_courses] subject_areas = defaultdict(list) for cn in course_numbers: sa, n = cn.split(' ') subject_areas[sa].append(n) return subject_areas
<commit_before><commit_msg>Add script for parsing foreign language listing<commit_after>#!/usr/bin/env python3 import bs4 from bs4 import BeautifulSoup from collections import defaultdict def get_table_tbody(soup): return soup.body.table.tbody.contents def extract_course_from_tr(tr): tds = tr.find_all('td') if tds[0].a is None: return None return { 'courseNumber': tds[0].a.string, 'title': tds[1].string } def extract_courses_from_soup(soup): tbody = get_table_tbody(soup) courses = [] for tr in tbody: if type(tr) == bs4.element.Tag: course = extract_course_from_tr(tr) if course is not None: courses.append(course) return courses def main(document): soup = BeautifulSoup(document) courses = extract_courses_from_soup(soup) filtered_courses = filter(lambda c: c['title'].startswith('Elementary'), courses) course_numbers = [c['courseNumber'].replace(u'\xa0', ' ') for c in filtered_courses] subject_areas = defaultdict(list) for cn in course_numbers: sa, n = cn.split(' ') subject_areas[sa].append(n) return subject_areas
5d2f96836d0d63c68eb3acd5b8477cef231c5746
json2csv_business.py
json2csv_business.py
import json def main(): # print the header of output csv file print 'business_id,city,latitude,longitude' # for each entry in input json file print one csv row for line in open("data/yelp_academic_dataset_business.json"): input_json = json.loads(line) business_id = input_json['business_id'] city = input_json['city'].encode('ascii', 'ignore') latitude = str(input_json['latitude']) longitude = str(input_json['longitude']) print business_id + ',' + city + ',' + latitude + ',' + longitude if __name__ == "__main__": main()
Add conversion from json to csv format for businesses
Add conversion from json to csv format for businesses
Python
mit
aysent/yelp-photo-explorer
Add conversion from json to csv format for businesses
import json def main(): # print the header of output csv file print 'business_id,city,latitude,longitude' # for each entry in input json file print one csv row for line in open("data/yelp_academic_dataset_business.json"): input_json = json.loads(line) business_id = input_json['business_id'] city = input_json['city'].encode('ascii', 'ignore') latitude = str(input_json['latitude']) longitude = str(input_json['longitude']) print business_id + ',' + city + ',' + latitude + ',' + longitude if __name__ == "__main__": main()
<commit_before><commit_msg>Add conversion from json to csv format for businesses<commit_after>
import json def main(): # print the header of output csv file print 'business_id,city,latitude,longitude' # for each entry in input json file print one csv row for line in open("data/yelp_academic_dataset_business.json"): input_json = json.loads(line) business_id = input_json['business_id'] city = input_json['city'].encode('ascii', 'ignore') latitude = str(input_json['latitude']) longitude = str(input_json['longitude']) print business_id + ',' + city + ',' + latitude + ',' + longitude if __name__ == "__main__": main()
Add conversion from json to csv format for businessesimport json def main(): # print the header of output csv file print 'business_id,city,latitude,longitude' # for each entry in input json file print one csv row for line in open("data/yelp_academic_dataset_business.json"): input_json = json.loads(line) business_id = input_json['business_id'] city = input_json['city'].encode('ascii', 'ignore') latitude = str(input_json['latitude']) longitude = str(input_json['longitude']) print business_id + ',' + city + ',' + latitude + ',' + longitude if __name__ == "__main__": main()
<commit_before><commit_msg>Add conversion from json to csv format for businesses<commit_after>import json def main(): # print the header of output csv file print 'business_id,city,latitude,longitude' # for each entry in input json file print one csv row for line in open("data/yelp_academic_dataset_business.json"): input_json = json.loads(line) business_id = input_json['business_id'] city = input_json['city'].encode('ascii', 'ignore') latitude = str(input_json['latitude']) longitude = str(input_json['longitude']) print business_id + ',' + city + ',' + latitude + ',' + longitude if __name__ == "__main__": main()
fed96a269df3f36deb1f37b80e0eb85f6abbb030
extract-patterns.py
extract-patterns.py
#!/usr/bin/env python """ Usage: extract-patterns.py FILE Print patterns in the structure of parse trees in FILE """ from sexp import sexps, tokenize, isterminal import re import sys filename = sys.argv[1] def remove_coindex(s): return re.sub('-\d+$', '', s) def get_verb(sexp): if isterminal(sexp): if sexp[0][0] == 'V': return sexp[1] else: vs = [get_verb(c) for c in sexp[1:] if c[0][0] == 'V'] if vs: return vs[-1] def isreportverb(vb): return vb in ["say", "says", "said", "announced", "announce", "announces"] def format_constituent(sexp): if isterminal(sexp): return '{1}/{0}'.format(*sexp) else: l = remove_coindex(sexp[0]) v = get_verb(sexp) if isreportverb(v): return '{0}/{1}'.format(l, v) else: return l for i,sexp in enumerate(sexps(tokenize(open(filename)))): if isinstance(sexp, list) and len(sexp) == 1: sexp = sexp[0] assert(isinstance(sexp[0], str)) children = sexp[1:] print( sexp[0], '=>', ' '.join( format_constituent(c) #('{1}/{0}'.format(*c) if isterminal(c) else remove_coindex(c[0])) for c in children ) )
Add improved script for extracting sentence patts
Add improved script for extracting sentence patts
Python
cc0-1.0
cordarei/ptb.py
Add improved script for extracting sentence patts
#!/usr/bin/env python """ Usage: extract-patterns.py FILE Print patterns in the structure of parse trees in FILE """ from sexp import sexps, tokenize, isterminal import re import sys filename = sys.argv[1] def remove_coindex(s): return re.sub('-\d+$', '', s) def get_verb(sexp): if isterminal(sexp): if sexp[0][0] == 'V': return sexp[1] else: vs = [get_verb(c) for c in sexp[1:] if c[0][0] == 'V'] if vs: return vs[-1] def isreportverb(vb): return vb in ["say", "says", "said", "announced", "announce", "announces"] def format_constituent(sexp): if isterminal(sexp): return '{1}/{0}'.format(*sexp) else: l = remove_coindex(sexp[0]) v = get_verb(sexp) if isreportverb(v): return '{0}/{1}'.format(l, v) else: return l for i,sexp in enumerate(sexps(tokenize(open(filename)))): if isinstance(sexp, list) and len(sexp) == 1: sexp = sexp[0] assert(isinstance(sexp[0], str)) children = sexp[1:] print( sexp[0], '=>', ' '.join( format_constituent(c) #('{1}/{0}'.format(*c) if isterminal(c) else remove_coindex(c[0])) for c in children ) )
<commit_before><commit_msg>Add improved script for extracting sentence patts<commit_after>
#!/usr/bin/env python """ Usage: extract-patterns.py FILE Print patterns in the structure of parse trees in FILE """ from sexp import sexps, tokenize, isterminal import re import sys filename = sys.argv[1] def remove_coindex(s): return re.sub('-\d+$', '', s) def get_verb(sexp): if isterminal(sexp): if sexp[0][0] == 'V': return sexp[1] else: vs = [get_verb(c) for c in sexp[1:] if c[0][0] == 'V'] if vs: return vs[-1] def isreportverb(vb): return vb in ["say", "says", "said", "announced", "announce", "announces"] def format_constituent(sexp): if isterminal(sexp): return '{1}/{0}'.format(*sexp) else: l = remove_coindex(sexp[0]) v = get_verb(sexp) if isreportverb(v): return '{0}/{1}'.format(l, v) else: return l for i,sexp in enumerate(sexps(tokenize(open(filename)))): if isinstance(sexp, list) and len(sexp) == 1: sexp = sexp[0] assert(isinstance(sexp[0], str)) children = sexp[1:] print( sexp[0], '=>', ' '.join( format_constituent(c) #('{1}/{0}'.format(*c) if isterminal(c) else remove_coindex(c[0])) for c in children ) )
Add improved script for extracting sentence patts#!/usr/bin/env python """ Usage: extract-patterns.py FILE Print patterns in the structure of parse trees in FILE """ from sexp import sexps, tokenize, isterminal import re import sys filename = sys.argv[1] def remove_coindex(s): return re.sub('-\d+$', '', s) def get_verb(sexp): if isterminal(sexp): if sexp[0][0] == 'V': return sexp[1] else: vs = [get_verb(c) for c in sexp[1:] if c[0][0] == 'V'] if vs: return vs[-1] def isreportverb(vb): return vb in ["say", "says", "said", "announced", "announce", "announces"] def format_constituent(sexp): if isterminal(sexp): return '{1}/{0}'.format(*sexp) else: l = remove_coindex(sexp[0]) v = get_verb(sexp) if isreportverb(v): return '{0}/{1}'.format(l, v) else: return l for i,sexp in enumerate(sexps(tokenize(open(filename)))): if isinstance(sexp, list) and len(sexp) == 1: sexp = sexp[0] assert(isinstance(sexp[0], str)) children = sexp[1:] print( sexp[0], '=>', ' '.join( format_constituent(c) #('{1}/{0}'.format(*c) if isterminal(c) else remove_coindex(c[0])) for c in children ) )
<commit_before><commit_msg>Add improved script for extracting sentence patts<commit_after>#!/usr/bin/env python """ Usage: extract-patterns.py FILE Print patterns in the structure of parse trees in FILE """ from sexp import sexps, tokenize, isterminal import re import sys filename = sys.argv[1] def remove_coindex(s): return re.sub('-\d+$', '', s) def get_verb(sexp): if isterminal(sexp): if sexp[0][0] == 'V': return sexp[1] else: vs = [get_verb(c) for c in sexp[1:] if c[0][0] == 'V'] if vs: return vs[-1] def isreportverb(vb): return vb in ["say", "says", "said", "announced", "announce", "announces"] def format_constituent(sexp): if isterminal(sexp): return '{1}/{0}'.format(*sexp) else: l = remove_coindex(sexp[0]) v = get_verb(sexp) if isreportverb(v): return '{0}/{1}'.format(l, v) else: return l for i,sexp in enumerate(sexps(tokenize(open(filename)))): if isinstance(sexp, list) and len(sexp) == 1: sexp = sexp[0] assert(isinstance(sexp[0], str)) children = sexp[1:] print( sexp[0], '=>', ' '.join( format_constituent(c) #('{1}/{0}'.format(*c) if isterminal(c) else remove_coindex(c[0])) for c in children ) )
14d6748e26c86c887cddc2ff0eb716c9db63a766
kNN.py
kNN.py
""" The program to classify the randomly generated data using the k-Nearest Neighbours algorithm """ import sys import numpy as np import math def loadData(fname='train.txt'): """ Function to load the randomly generated data Keyword arguments: fname -- The file name to load the data in to the numpy array from (default "train.txt") """ try: data = np.loadtxt(fname, delimiter=',') except Exception as e: sys.exit("Could not open file!!") np.random.shuffle(data) num_cols = data.shape[1] y = data[:, num_cols-1] X = np.ones(shape=(len(y),num_cols-1)) for i in range(num_cols-1): X[:,i] = data[:,i] X = X.astype(float) return X, y def euclidean_distance(x1, x2): """ Function to calculate the Euclidean distance between 2 data points Keyword arguments: x1 -- the first data point x2 -- the second data point """ distance = 0 for i in range(len(x1)): distance += pow((x1[i] - x2[i]), 2) return math.sqrt(distance) def main(): train_name = raw_input("Please enter the name of the file containing the \ training data\n") test_name = raw_input("Please enter the name of the file containing the \ testing data\n") train_X, train_y = loadData(fname=train_name) test_X, test_y = loadData(fname=test_name) try: k = int(raw_input("Please enter k, the number of neighbours to compare to: ")) except Exception as e: k = 3 if k<1 or k>9: k = 3 num_classes = len(np.unique(train_y)) if k<=num_classes: k = num_classes+1 if not k%2: k+=1 if __name__ == '__main__': main()
Add the initial skeleton of the classifier
Add the initial skeleton of the classifier
Python
mit
Anirudh-Swaminathan/kNN_py
Add the initial skeleton of the classifier
""" The program to classify the randomly generated data using the k-Nearest Neighbours algorithm """ import sys import numpy as np import math def loadData(fname='train.txt'): """ Function to load the randomly generated data Keyword arguments: fname -- The file name to load the data in to the numpy array from (default "train.txt") """ try: data = np.loadtxt(fname, delimiter=',') except Exception as e: sys.exit("Could not open file!!") np.random.shuffle(data) num_cols = data.shape[1] y = data[:, num_cols-1] X = np.ones(shape=(len(y),num_cols-1)) for i in range(num_cols-1): X[:,i] = data[:,i] X = X.astype(float) return X, y def euclidean_distance(x1, x2): """ Function to calculate the Euclidean distance between 2 data points Keyword arguments: x1 -- the first data point x2 -- the second data point """ distance = 0 for i in range(len(x1)): distance += pow((x1[i] - x2[i]), 2) return math.sqrt(distance) def main(): train_name = raw_input("Please enter the name of the file containing the \ training data\n") test_name = raw_input("Please enter the name of the file containing the \ testing data\n") train_X, train_y = loadData(fname=train_name) test_X, test_y = loadData(fname=test_name) try: k = int(raw_input("Please enter k, the number of neighbours to compare to: ")) except Exception as e: k = 3 if k<1 or k>9: k = 3 num_classes = len(np.unique(train_y)) if k<=num_classes: k = num_classes+1 if not k%2: k+=1 if __name__ == '__main__': main()
<commit_before><commit_msg>Add the initial skeleton of the classifier<commit_after>
""" The program to classify the randomly generated data using the k-Nearest Neighbours algorithm """ import sys import numpy as np import math def loadData(fname='train.txt'): """ Function to load the randomly generated data Keyword arguments: fname -- The file name to load the data in to the numpy array from (default "train.txt") """ try: data = np.loadtxt(fname, delimiter=',') except Exception as e: sys.exit("Could not open file!!") np.random.shuffle(data) num_cols = data.shape[1] y = data[:, num_cols-1] X = np.ones(shape=(len(y),num_cols-1)) for i in range(num_cols-1): X[:,i] = data[:,i] X = X.astype(float) return X, y def euclidean_distance(x1, x2): """ Function to calculate the Euclidean distance between 2 data points Keyword arguments: x1 -- the first data point x2 -- the second data point """ distance = 0 for i in range(len(x1)): distance += pow((x1[i] - x2[i]), 2) return math.sqrt(distance) def main(): train_name = raw_input("Please enter the name of the file containing the \ training data\n") test_name = raw_input("Please enter the name of the file containing the \ testing data\n") train_X, train_y = loadData(fname=train_name) test_X, test_y = loadData(fname=test_name) try: k = int(raw_input("Please enter k, the number of neighbours to compare to: ")) except Exception as e: k = 3 if k<1 or k>9: k = 3 num_classes = len(np.unique(train_y)) if k<=num_classes: k = num_classes+1 if not k%2: k+=1 if __name__ == '__main__': main()
Add the initial skeleton of the classifier""" The program to classify the randomly generated data using the k-Nearest Neighbours algorithm """ import sys import numpy as np import math def loadData(fname='train.txt'): """ Function to load the randomly generated data Keyword arguments: fname -- The file name to load the data in to the numpy array from (default "train.txt") """ try: data = np.loadtxt(fname, delimiter=',') except Exception as e: sys.exit("Could not open file!!") np.random.shuffle(data) num_cols = data.shape[1] y = data[:, num_cols-1] X = np.ones(shape=(len(y),num_cols-1)) for i in range(num_cols-1): X[:,i] = data[:,i] X = X.astype(float) return X, y def euclidean_distance(x1, x2): """ Function to calculate the Euclidean distance between 2 data points Keyword arguments: x1 -- the first data point x2 -- the second data point """ distance = 0 for i in range(len(x1)): distance += pow((x1[i] - x2[i]), 2) return math.sqrt(distance) def main(): train_name = raw_input("Please enter the name of the file containing the \ training data\n") test_name = raw_input("Please enter the name of the file containing the \ testing data\n") train_X, train_y = loadData(fname=train_name) test_X, test_y = loadData(fname=test_name) try: k = int(raw_input("Please enter k, the number of neighbours to compare to: ")) except Exception as e: k = 3 if k<1 or k>9: k = 3 num_classes = len(np.unique(train_y)) if k<=num_classes: k = num_classes+1 if not k%2: k+=1 if __name__ == '__main__': main()
<commit_before><commit_msg>Add the initial skeleton of the classifier<commit_after>""" The program to classify the randomly generated data using the k-Nearest Neighbours algorithm """ import sys import numpy as np import math def loadData(fname='train.txt'): """ Function to load the randomly generated data Keyword arguments: fname -- The file name to load the data in to the numpy array from (default "train.txt") """ try: data = np.loadtxt(fname, delimiter=',') except Exception as e: sys.exit("Could not open file!!") np.random.shuffle(data) num_cols = data.shape[1] y = data[:, num_cols-1] X = np.ones(shape=(len(y),num_cols-1)) for i in range(num_cols-1): X[:,i] = data[:,i] X = X.astype(float) return X, y def euclidean_distance(x1, x2): """ Function to calculate the Euclidean distance between 2 data points Keyword arguments: x1 -- the first data point x2 -- the second data point """ distance = 0 for i in range(len(x1)): distance += pow((x1[i] - x2[i]), 2) return math.sqrt(distance) def main(): train_name = raw_input("Please enter the name of the file containing the \ training data\n") test_name = raw_input("Please enter the name of the file containing the \ testing data\n") train_X, train_y = loadData(fname=train_name) test_X, test_y = loadData(fname=test_name) try: k = int(raw_input("Please enter k, the number of neighbours to compare to: ")) except Exception as e: k = 3 if k<1 or k>9: k = 3 num_classes = len(np.unique(train_y)) if k<=num_classes: k = num_classes+1 if not k%2: k+=1 if __name__ == '__main__': main()
e3728beb16dfb72a7be71cdf8ea53adb730827ad
examples/share-amis-boto3.py
examples/share-amis-boto3.py
import os import boto3 import botocore from boto3.core.session import Session os.environ['AWS_CONFIG_FILE'] = "/etc/profiles.cfg" orig_owner = "" shared_owner = "" region = "us-west-2" session = botocore.session.get_session() session.profile = 'beta' session = Session(session=session) ec2_conn = session.connect_to('ec2', region_name=region) response_data = ec2_conn.describe_images( filters=[{'Name': "tag:Role", "Values": ["ScannerDy"]}, {'Name': "tag:Version", "Values": ["1051"]}]) import json print json.dumps(response_data, sort_keys=True, indent=4, separators=(',', ': ')) owner_id = response_data['Images'][0]['OwnerId'] if owner_id != orig_owner: exit ami_id = response_data['Images'][0]['ImageId'] tags = response_data['Images'][0]['Tags'] response_data = ec2_conn.describe_image_attribute( image_id=ami_id, attribute='launchPermission') import json print json.dumps(response_data, sort_keys=True, indent=4, separators=(',', ': ')) permission = {"Add": [{"UserId": shared_owner}]} response_data = ec2_conn.modify_image_attribute( image_id=ami_id, launch_permission=permission) import json print json.dumps(response_data, sort_keys=True, indent=4, separators=(',', ': ')) print '-' * 10, 'switched' session = botocore.session.get_session() session.profile = 'qa' session = Session(session=session) ec2_conn = session.connect_to('ec2', region_name=region) reponse_data = ec2_conn.create_tags(resources=[ami_id], tags=tags) import json print json.dumps(response_data, sort_keys=True, indent=4, separators=(',', ': ')) response_data = ec2_conn.describe_images( filters=[{'Name': "tag:Role", "Values": ["ScannerDy"]}, {'Name': "tag:Version", "Values": ["1051"]}]) import json print json.dumps(response_data, sort_keys=True, indent=4, separators=(',', ': '))
Update the example via boto3
Update the example via boto3
Python
apache-2.0
henrysher/kamboo,henrysher/kamboo
Update the example via boto3
import os import boto3 import botocore from boto3.core.session import Session os.environ['AWS_CONFIG_FILE'] = "/etc/profiles.cfg" orig_owner = "" shared_owner = "" region = "us-west-2" session = botocore.session.get_session() session.profile = 'beta' session = Session(session=session) ec2_conn = session.connect_to('ec2', region_name=region) response_data = ec2_conn.describe_images( filters=[{'Name': "tag:Role", "Values": ["ScannerDy"]}, {'Name': "tag:Version", "Values": ["1051"]}]) import json print json.dumps(response_data, sort_keys=True, indent=4, separators=(',', ': ')) owner_id = response_data['Images'][0]['OwnerId'] if owner_id != orig_owner: exit ami_id = response_data['Images'][0]['ImageId'] tags = response_data['Images'][0]['Tags'] response_data = ec2_conn.describe_image_attribute( image_id=ami_id, attribute='launchPermission') import json print json.dumps(response_data, sort_keys=True, indent=4, separators=(',', ': ')) permission = {"Add": [{"UserId": shared_owner}]} response_data = ec2_conn.modify_image_attribute( image_id=ami_id, launch_permission=permission) import json print json.dumps(response_data, sort_keys=True, indent=4, separators=(',', ': ')) print '-' * 10, 'switched' session = botocore.session.get_session() session.profile = 'qa' session = Session(session=session) ec2_conn = session.connect_to('ec2', region_name=region) reponse_data = ec2_conn.create_tags(resources=[ami_id], tags=tags) import json print json.dumps(response_data, sort_keys=True, indent=4, separators=(',', ': ')) response_data = ec2_conn.describe_images( filters=[{'Name': "tag:Role", "Values": ["ScannerDy"]}, {'Name': "tag:Version", "Values": ["1051"]}]) import json print json.dumps(response_data, sort_keys=True, indent=4, separators=(',', ': '))
<commit_before><commit_msg>Update the example via boto3<commit_after>
import os import boto3 import botocore from boto3.core.session import Session os.environ['AWS_CONFIG_FILE'] = "/etc/profiles.cfg" orig_owner = "" shared_owner = "" region = "us-west-2" session = botocore.session.get_session() session.profile = 'beta' session = Session(session=session) ec2_conn = session.connect_to('ec2', region_name=region) response_data = ec2_conn.describe_images( filters=[{'Name': "tag:Role", "Values": ["ScannerDy"]}, {'Name': "tag:Version", "Values": ["1051"]}]) import json print json.dumps(response_data, sort_keys=True, indent=4, separators=(',', ': ')) owner_id = response_data['Images'][0]['OwnerId'] if owner_id != orig_owner: exit ami_id = response_data['Images'][0]['ImageId'] tags = response_data['Images'][0]['Tags'] response_data = ec2_conn.describe_image_attribute( image_id=ami_id, attribute='launchPermission') import json print json.dumps(response_data, sort_keys=True, indent=4, separators=(',', ': ')) permission = {"Add": [{"UserId": shared_owner}]} response_data = ec2_conn.modify_image_attribute( image_id=ami_id, launch_permission=permission) import json print json.dumps(response_data, sort_keys=True, indent=4, separators=(',', ': ')) print '-' * 10, 'switched' session = botocore.session.get_session() session.profile = 'qa' session = Session(session=session) ec2_conn = session.connect_to('ec2', region_name=region) reponse_data = ec2_conn.create_tags(resources=[ami_id], tags=tags) import json print json.dumps(response_data, sort_keys=True, indent=4, separators=(',', ': ')) response_data = ec2_conn.describe_images( filters=[{'Name': "tag:Role", "Values": ["ScannerDy"]}, {'Name': "tag:Version", "Values": ["1051"]}]) import json print json.dumps(response_data, sort_keys=True, indent=4, separators=(',', ': '))
Update the example via boto3import os import boto3 import botocore from boto3.core.session import Session os.environ['AWS_CONFIG_FILE'] = "/etc/profiles.cfg" orig_owner = "" shared_owner = "" region = "us-west-2" session = botocore.session.get_session() session.profile = 'beta' session = Session(session=session) ec2_conn = session.connect_to('ec2', region_name=region) response_data = ec2_conn.describe_images( filters=[{'Name': "tag:Role", "Values": ["ScannerDy"]}, {'Name': "tag:Version", "Values": ["1051"]}]) import json print json.dumps(response_data, sort_keys=True, indent=4, separators=(',', ': ')) owner_id = response_data['Images'][0]['OwnerId'] if owner_id != orig_owner: exit ami_id = response_data['Images'][0]['ImageId'] tags = response_data['Images'][0]['Tags'] response_data = ec2_conn.describe_image_attribute( image_id=ami_id, attribute='launchPermission') import json print json.dumps(response_data, sort_keys=True, indent=4, separators=(',', ': ')) permission = {"Add": [{"UserId": shared_owner}]} response_data = ec2_conn.modify_image_attribute( image_id=ami_id, launch_permission=permission) import json print json.dumps(response_data, sort_keys=True, indent=4, separators=(',', ': ')) print '-' * 10, 'switched' session = botocore.session.get_session() session.profile = 'qa' session = Session(session=session) ec2_conn = session.connect_to('ec2', region_name=region) reponse_data = ec2_conn.create_tags(resources=[ami_id], tags=tags) import json print json.dumps(response_data, sort_keys=True, indent=4, separators=(',', ': ')) response_data = ec2_conn.describe_images( filters=[{'Name': "tag:Role", "Values": ["ScannerDy"]}, {'Name': "tag:Version", "Values": ["1051"]}]) import json print json.dumps(response_data, sort_keys=True, indent=4, separators=(',', ': '))
<commit_before><commit_msg>Update the example via boto3<commit_after>import os import boto3 import botocore from boto3.core.session import Session os.environ['AWS_CONFIG_FILE'] = "/etc/profiles.cfg" orig_owner = "" shared_owner = "" region = "us-west-2" session = botocore.session.get_session() session.profile = 'beta' session = Session(session=session) ec2_conn = session.connect_to('ec2', region_name=region) response_data = ec2_conn.describe_images( filters=[{'Name': "tag:Role", "Values": ["ScannerDy"]}, {'Name': "tag:Version", "Values": ["1051"]}]) import json print json.dumps(response_data, sort_keys=True, indent=4, separators=(',', ': ')) owner_id = response_data['Images'][0]['OwnerId'] if owner_id != orig_owner: exit ami_id = response_data['Images'][0]['ImageId'] tags = response_data['Images'][0]['Tags'] response_data = ec2_conn.describe_image_attribute( image_id=ami_id, attribute='launchPermission') import json print json.dumps(response_data, sort_keys=True, indent=4, separators=(',', ': ')) permission = {"Add": [{"UserId": shared_owner}]} response_data = ec2_conn.modify_image_attribute( image_id=ami_id, launch_permission=permission) import json print json.dumps(response_data, sort_keys=True, indent=4, separators=(',', ': ')) print '-' * 10, 'switched' session = botocore.session.get_session() session.profile = 'qa' session = Session(session=session) ec2_conn = session.connect_to('ec2', region_name=region) reponse_data = ec2_conn.create_tags(resources=[ami_id], tags=tags) import json print json.dumps(response_data, sort_keys=True, indent=4, separators=(',', ': ')) response_data = ec2_conn.describe_images( filters=[{'Name': "tag:Role", "Values": ["ScannerDy"]}, {'Name': "tag:Version", "Values": ["1051"]}]) import json print json.dumps(response_data, sort_keys=True, indent=4, separators=(',', ': '))
8b7941e488bc902f7fa831b8c020b3eb07a154c0
flexget/tests/test_serialization.py
flexget/tests/test_serialization.py
import datetime from flexget import entry from flexget.utils import qualities class TestSerialization: def test_entry_serialization(self): entry1 = entry.Entry({ 'title': 'blah', 'url': 'http://blah', 'listfield': ['a', 'b', 1, 2], 'dictfield': {'a': 1, 'b': 2}, 'intfield': 5, 'floatfield': 5.5, 'datefield': datetime.date(1999, 9, 9), 'datetimefield': datetime.datetime(1999, 9, 9, 9, 9), 'qualityfield': qualities.Quality('720p hdtv'), }) serialized = entry1.dumps() entry2 = entry.Entry.loads(serialized) # Use the underlying dict, so we compare all fields assert entry1.store == entry2.store
Add a test for serialization functionality
Add a test for serialization functionality
Python
mit
ianstalk/Flexget,Flexget/Flexget,malkavi/Flexget,malkavi/Flexget,crawln45/Flexget,Flexget/Flexget,ianstalk/Flexget,Flexget/Flexget,crawln45/Flexget,malkavi/Flexget,crawln45/Flexget,Flexget/Flexget,malkavi/Flexget,ianstalk/Flexget,crawln45/Flexget
Add a test for serialization functionality
import datetime from flexget import entry from flexget.utils import qualities class TestSerialization: def test_entry_serialization(self): entry1 = entry.Entry({ 'title': 'blah', 'url': 'http://blah', 'listfield': ['a', 'b', 1, 2], 'dictfield': {'a': 1, 'b': 2}, 'intfield': 5, 'floatfield': 5.5, 'datefield': datetime.date(1999, 9, 9), 'datetimefield': datetime.datetime(1999, 9, 9, 9, 9), 'qualityfield': qualities.Quality('720p hdtv'), }) serialized = entry1.dumps() entry2 = entry.Entry.loads(serialized) # Use the underlying dict, so we compare all fields assert entry1.store == entry2.store
<commit_before><commit_msg>Add a test for serialization functionality<commit_after>
import datetime from flexget import entry from flexget.utils import qualities class TestSerialization: def test_entry_serialization(self): entry1 = entry.Entry({ 'title': 'blah', 'url': 'http://blah', 'listfield': ['a', 'b', 1, 2], 'dictfield': {'a': 1, 'b': 2}, 'intfield': 5, 'floatfield': 5.5, 'datefield': datetime.date(1999, 9, 9), 'datetimefield': datetime.datetime(1999, 9, 9, 9, 9), 'qualityfield': qualities.Quality('720p hdtv'), }) serialized = entry1.dumps() entry2 = entry.Entry.loads(serialized) # Use the underlying dict, so we compare all fields assert entry1.store == entry2.store
Add a test for serialization functionalityimport datetime from flexget import entry from flexget.utils import qualities class TestSerialization: def test_entry_serialization(self): entry1 = entry.Entry({ 'title': 'blah', 'url': 'http://blah', 'listfield': ['a', 'b', 1, 2], 'dictfield': {'a': 1, 'b': 2}, 'intfield': 5, 'floatfield': 5.5, 'datefield': datetime.date(1999, 9, 9), 'datetimefield': datetime.datetime(1999, 9, 9, 9, 9), 'qualityfield': qualities.Quality('720p hdtv'), }) serialized = entry1.dumps() entry2 = entry.Entry.loads(serialized) # Use the underlying dict, so we compare all fields assert entry1.store == entry2.store
<commit_before><commit_msg>Add a test for serialization functionality<commit_after>import datetime from flexget import entry from flexget.utils import qualities class TestSerialization: def test_entry_serialization(self): entry1 = entry.Entry({ 'title': 'blah', 'url': 'http://blah', 'listfield': ['a', 'b', 1, 2], 'dictfield': {'a': 1, 'b': 2}, 'intfield': 5, 'floatfield': 5.5, 'datefield': datetime.date(1999, 9, 9), 'datetimefield': datetime.datetime(1999, 9, 9, 9, 9), 'qualityfield': qualities.Quality('720p hdtv'), }) serialized = entry1.dumps() entry2 = entry.Entry.loads(serialized) # Use the underlying dict, so we compare all fields assert entry1.store == entry2.store
0d210930c48cf490435e2ff310955650e8f606aa
db/player_game.py
db/player_game.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from .common import Base from .player import Player class PlayerGame(Base): __tablename__ = 'player_games' __autoload__ = True STANDARD_ATTRS = [ "position", "no", "goals", "assists", "primary_assists", "secondary_assists", "points", "plus_minus", "penalties", "pim", "toi_overall", "toi_pp", "toi_sh", "toi_ev", "avg_shift", "no_shifts" "shots_on_goal", "shots_blocked", "shots_missed", "hits", "giveaways", "takeaways", "blocks", "faceoffs_won", "faceoffs_lost", "on_ice_shots_on_goal", "on_ice_shots_blocked", "on_ice_shots_missed" ] def __init__(self, plr_game_id, game_id, team_id, plr_id, data_dict): self.player_game_id = plr_game_id self.game_id = game_id self.team_id = team_id self.player_id = plr_id for attr in self.STANDARD_ATTRS: if attr in data_dict: setattr(self, attr, data_dict[attr]) def __str__(self): if not hasattr(self, 'player') or self.player is None: self.player = Player.find_by_id(self.player_id) return "%-40s %d G %d A %d Pts./%d PIM" % ( self.player, self.goals, self.assists, self.points, self.pim)
Introduce initial version of player game item
Introduce initial version of player game item
Python
mit
leaffan/pynhldb
Introduce initial version of player game item
#!/usr/bin/env python # -*- coding: utf-8 -*- from .common import Base from .player import Player class PlayerGame(Base): __tablename__ = 'player_games' __autoload__ = True STANDARD_ATTRS = [ "position", "no", "goals", "assists", "primary_assists", "secondary_assists", "points", "plus_minus", "penalties", "pim", "toi_overall", "toi_pp", "toi_sh", "toi_ev", "avg_shift", "no_shifts" "shots_on_goal", "shots_blocked", "shots_missed", "hits", "giveaways", "takeaways", "blocks", "faceoffs_won", "faceoffs_lost", "on_ice_shots_on_goal", "on_ice_shots_blocked", "on_ice_shots_missed" ] def __init__(self, plr_game_id, game_id, team_id, plr_id, data_dict): self.player_game_id = plr_game_id self.game_id = game_id self.team_id = team_id self.player_id = plr_id for attr in self.STANDARD_ATTRS: if attr in data_dict: setattr(self, attr, data_dict[attr]) def __str__(self): if not hasattr(self, 'player') or self.player is None: self.player = Player.find_by_id(self.player_id) return "%-40s %d G %d A %d Pts./%d PIM" % ( self.player, self.goals, self.assists, self.points, self.pim)
<commit_before><commit_msg>Introduce initial version of player game item<commit_after>
#!/usr/bin/env python # -*- coding: utf-8 -*- from .common import Base from .player import Player class PlayerGame(Base): __tablename__ = 'player_games' __autoload__ = True STANDARD_ATTRS = [ "position", "no", "goals", "assists", "primary_assists", "secondary_assists", "points", "plus_minus", "penalties", "pim", "toi_overall", "toi_pp", "toi_sh", "toi_ev", "avg_shift", "no_shifts" "shots_on_goal", "shots_blocked", "shots_missed", "hits", "giveaways", "takeaways", "blocks", "faceoffs_won", "faceoffs_lost", "on_ice_shots_on_goal", "on_ice_shots_blocked", "on_ice_shots_missed" ] def __init__(self, plr_game_id, game_id, team_id, plr_id, data_dict): self.player_game_id = plr_game_id self.game_id = game_id self.team_id = team_id self.player_id = plr_id for attr in self.STANDARD_ATTRS: if attr in data_dict: setattr(self, attr, data_dict[attr]) def __str__(self): if not hasattr(self, 'player') or self.player is None: self.player = Player.find_by_id(self.player_id) return "%-40s %d G %d A %d Pts./%d PIM" % ( self.player, self.goals, self.assists, self.points, self.pim)
Introduce initial version of player game item#!/usr/bin/env python # -*- coding: utf-8 -*- from .common import Base from .player import Player class PlayerGame(Base): __tablename__ = 'player_games' __autoload__ = True STANDARD_ATTRS = [ "position", "no", "goals", "assists", "primary_assists", "secondary_assists", "points", "plus_minus", "penalties", "pim", "toi_overall", "toi_pp", "toi_sh", "toi_ev", "avg_shift", "no_shifts" "shots_on_goal", "shots_blocked", "shots_missed", "hits", "giveaways", "takeaways", "blocks", "faceoffs_won", "faceoffs_lost", "on_ice_shots_on_goal", "on_ice_shots_blocked", "on_ice_shots_missed" ] def __init__(self, plr_game_id, game_id, team_id, plr_id, data_dict): self.player_game_id = plr_game_id self.game_id = game_id self.team_id = team_id self.player_id = plr_id for attr in self.STANDARD_ATTRS: if attr in data_dict: setattr(self, attr, data_dict[attr]) def __str__(self): if not hasattr(self, 'player') or self.player is None: self.player = Player.find_by_id(self.player_id) return "%-40s %d G %d A %d Pts./%d PIM" % ( self.player, self.goals, self.assists, self.points, self.pim)
<commit_before><commit_msg>Introduce initial version of player game item<commit_after>#!/usr/bin/env python # -*- coding: utf-8 -*- from .common import Base from .player import Player class PlayerGame(Base): __tablename__ = 'player_games' __autoload__ = True STANDARD_ATTRS = [ "position", "no", "goals", "assists", "primary_assists", "secondary_assists", "points", "plus_minus", "penalties", "pim", "toi_overall", "toi_pp", "toi_sh", "toi_ev", "avg_shift", "no_shifts" "shots_on_goal", "shots_blocked", "shots_missed", "hits", "giveaways", "takeaways", "blocks", "faceoffs_won", "faceoffs_lost", "on_ice_shots_on_goal", "on_ice_shots_blocked", "on_ice_shots_missed" ] def __init__(self, plr_game_id, game_id, team_id, plr_id, data_dict): self.player_game_id = plr_game_id self.game_id = game_id self.team_id = team_id self.player_id = plr_id for attr in self.STANDARD_ATTRS: if attr in data_dict: setattr(self, attr, data_dict[attr]) def __str__(self): if not hasattr(self, 'player') or self.player is None: self.player = Player.find_by_id(self.player_id) return "%-40s %d G %d A %d Pts./%d PIM" % ( self.player, self.goals, self.assists, self.points, self.pim)
b7b30695022314a28a8d14878ce13566b8dd9522
code2html/tests/unit/test_util.py
code2html/tests/unit/test_util.py
# -*- coding: utf-8 -*- import unittest from code2html.util import get_subdir_name, included class GetSubdirNameTest(unittest.TestCase): def test_root_starts_with_dot(self): root = '.' dir_name = './first/second' self.assertEqual(get_subdir_name(root, dir_name), 'first/second') def test_root_starts_with_wave(self): root = '~' dir_name = '~/first/second/third' self.assertEqual(get_subdir_name(root, dir_name), 'first/second/third') class IncludedTest(unittest.TestCase): def test_with_star_wildcard_matching(self): f = 'source_code.py' includes = ['*.py'] self.assertTrue(included(f, includes)) def test_with_question_mark_wildcard_matching(self): f1 = 'source_code.py' f2 = 's.py' includes = ['?.py'] t1 = included(f1, includes) t2 = included(f2, includes) self.assertEqual((t1, t2), (False, True))
Add unit tests for utils
Add unit tests for utils
Python
mit
kfei/code2html
Add unit tests for utils
# -*- coding: utf-8 -*- import unittest from code2html.util import get_subdir_name, included class GetSubdirNameTest(unittest.TestCase): def test_root_starts_with_dot(self): root = '.' dir_name = './first/second' self.assertEqual(get_subdir_name(root, dir_name), 'first/second') def test_root_starts_with_wave(self): root = '~' dir_name = '~/first/second/third' self.assertEqual(get_subdir_name(root, dir_name), 'first/second/third') class IncludedTest(unittest.TestCase): def test_with_star_wildcard_matching(self): f = 'source_code.py' includes = ['*.py'] self.assertTrue(included(f, includes)) def test_with_question_mark_wildcard_matching(self): f1 = 'source_code.py' f2 = 's.py' includes = ['?.py'] t1 = included(f1, includes) t2 = included(f2, includes) self.assertEqual((t1, t2), (False, True))
<commit_before><commit_msg>Add unit tests for utils<commit_after>
# -*- coding: utf-8 -*- import unittest from code2html.util import get_subdir_name, included class GetSubdirNameTest(unittest.TestCase): def test_root_starts_with_dot(self): root = '.' dir_name = './first/second' self.assertEqual(get_subdir_name(root, dir_name), 'first/second') def test_root_starts_with_wave(self): root = '~' dir_name = '~/first/second/third' self.assertEqual(get_subdir_name(root, dir_name), 'first/second/third') class IncludedTest(unittest.TestCase): def test_with_star_wildcard_matching(self): f = 'source_code.py' includes = ['*.py'] self.assertTrue(included(f, includes)) def test_with_question_mark_wildcard_matching(self): f1 = 'source_code.py' f2 = 's.py' includes = ['?.py'] t1 = included(f1, includes) t2 = included(f2, includes) self.assertEqual((t1, t2), (False, True))
Add unit tests for utils# -*- coding: utf-8 -*- import unittest from code2html.util import get_subdir_name, included class GetSubdirNameTest(unittest.TestCase): def test_root_starts_with_dot(self): root = '.' dir_name = './first/second' self.assertEqual(get_subdir_name(root, dir_name), 'first/second') def test_root_starts_with_wave(self): root = '~' dir_name = '~/first/second/third' self.assertEqual(get_subdir_name(root, dir_name), 'first/second/third') class IncludedTest(unittest.TestCase): def test_with_star_wildcard_matching(self): f = 'source_code.py' includes = ['*.py'] self.assertTrue(included(f, includes)) def test_with_question_mark_wildcard_matching(self): f1 = 'source_code.py' f2 = 's.py' includes = ['?.py'] t1 = included(f1, includes) t2 = included(f2, includes) self.assertEqual((t1, t2), (False, True))
<commit_before><commit_msg>Add unit tests for utils<commit_after># -*- coding: utf-8 -*- import unittest from code2html.util import get_subdir_name, included class GetSubdirNameTest(unittest.TestCase): def test_root_starts_with_dot(self): root = '.' dir_name = './first/second' self.assertEqual(get_subdir_name(root, dir_name), 'first/second') def test_root_starts_with_wave(self): root = '~' dir_name = '~/first/second/third' self.assertEqual(get_subdir_name(root, dir_name), 'first/second/third') class IncludedTest(unittest.TestCase): def test_with_star_wildcard_matching(self): f = 'source_code.py' includes = ['*.py'] self.assertTrue(included(f, includes)) def test_with_question_mark_wildcard_matching(self): f1 = 'source_code.py' f2 = 's.py' includes = ['?.py'] t1 = included(f1, includes) t2 = included(f2, includes) self.assertEqual((t1, t2), (False, True))
2baaa4d754838cfc8196c678f86a8419ca139c12
test/single_test.py
test/single_test.py
import sys import unittest from unittest import TestSuite def suite(test_name): suite = unittest.TestSuite() suite.addTest(unittest.defaultTestLoader.loadTestsFromName(test_name)) return suite if __name__ == "__main__": if len(sys.argv) < 2: print("Usage {test_name}") sys.exit(1) runner = unittest.TextTestRunner() runner.run(suite(sys.argv[1]))
Add utility that allows debugging of single test
Add utility that allows debugging of single test
Python
mit
JakubPetriska/poker-cfr,JakubPetriska/poker-cfr
Add utility that allows debugging of single test
import sys import unittest from unittest import TestSuite def suite(test_name): suite = unittest.TestSuite() suite.addTest(unittest.defaultTestLoader.loadTestsFromName(test_name)) return suite if __name__ == "__main__": if len(sys.argv) < 2: print("Usage {test_name}") sys.exit(1) runner = unittest.TextTestRunner() runner.run(suite(sys.argv[1]))
<commit_before><commit_msg>Add utility that allows debugging of single test<commit_after>
import sys import unittest from unittest import TestSuite def suite(test_name): suite = unittest.TestSuite() suite.addTest(unittest.defaultTestLoader.loadTestsFromName(test_name)) return suite if __name__ == "__main__": if len(sys.argv) < 2: print("Usage {test_name}") sys.exit(1) runner = unittest.TextTestRunner() runner.run(suite(sys.argv[1]))
Add utility that allows debugging of single testimport sys import unittest from unittest import TestSuite def suite(test_name): suite = unittest.TestSuite() suite.addTest(unittest.defaultTestLoader.loadTestsFromName(test_name)) return suite if __name__ == "__main__": if len(sys.argv) < 2: print("Usage {test_name}") sys.exit(1) runner = unittest.TextTestRunner() runner.run(suite(sys.argv[1]))
<commit_before><commit_msg>Add utility that allows debugging of single test<commit_after>import sys import unittest from unittest import TestSuite def suite(test_name): suite = unittest.TestSuite() suite.addTest(unittest.defaultTestLoader.loadTestsFromName(test_name)) return suite if __name__ == "__main__": if len(sys.argv) < 2: print("Usage {test_name}") sys.exit(1) runner = unittest.TextTestRunner() runner.run(suite(sys.argv[1]))
33be10064a7f2eb80b826e0d26fa9856f1c5f231
tests/test_model.py
tests/test_model.py
import unittest from gb import db,app from gb.models import (Wig, WigValue, BasePair, Bed, Annotation, Fasta, User, Track, View, ViewTrack) import logging logging.basicConfig() LOG = logging.getLogger(__name__) class TestSetUp(unittest.TestCase): def setUp(self): app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://' db.create_all() def tearDown(self): db.session.remove() db.drop_all() def test_set_up(self): self.assertFalse(db.session.query(User).all()) class TestWig(unittest.TestCase): def setUp(self): app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://' db.create_all() def tearDown(self): db.session.remove() db.drop_all() def test_create_empty_variable_wig(self): wig = Wig('variable') db.session.add(wig) db.session.commit() def test_create_empty_fixed_wig(self): wig = Wig('variable') db.session.add(wig) db.session.commit() def test_create_wig_with_values(self): wig = Wig('variable') wig.values = [WigValue(i,x,None) for i,x in enumerate(range(0,1000,10))] db.session.add(wig) db.session.commit() self.assertEqual(len(range(0,1000,10)),len(db.session.query(Wig).first().values))
Add beginnings of tests for model
Add beginnings of tests for model
Python
mit
mbiokyle29/geno-browser,mbiokyle29/geno-browser,mbiokyle29/geno-browser
Add beginnings of tests for model
import unittest from gb import db,app from gb.models import (Wig, WigValue, BasePair, Bed, Annotation, Fasta, User, Track, View, ViewTrack) import logging logging.basicConfig() LOG = logging.getLogger(__name__) class TestSetUp(unittest.TestCase): def setUp(self): app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://' db.create_all() def tearDown(self): db.session.remove() db.drop_all() def test_set_up(self): self.assertFalse(db.session.query(User).all()) class TestWig(unittest.TestCase): def setUp(self): app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://' db.create_all() def tearDown(self): db.session.remove() db.drop_all() def test_create_empty_variable_wig(self): wig = Wig('variable') db.session.add(wig) db.session.commit() def test_create_empty_fixed_wig(self): wig = Wig('variable') db.session.add(wig) db.session.commit() def test_create_wig_with_values(self): wig = Wig('variable') wig.values = [WigValue(i,x,None) for i,x in enumerate(range(0,1000,10))] db.session.add(wig) db.session.commit() self.assertEqual(len(range(0,1000,10)),len(db.session.query(Wig).first().values))
<commit_before><commit_msg>Add beginnings of tests for model<commit_after>
import unittest from gb import db,app from gb.models import (Wig, WigValue, BasePair, Bed, Annotation, Fasta, User, Track, View, ViewTrack) import logging logging.basicConfig() LOG = logging.getLogger(__name__) class TestSetUp(unittest.TestCase): def setUp(self): app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://' db.create_all() def tearDown(self): db.session.remove() db.drop_all() def test_set_up(self): self.assertFalse(db.session.query(User).all()) class TestWig(unittest.TestCase): def setUp(self): app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://' db.create_all() def tearDown(self): db.session.remove() db.drop_all() def test_create_empty_variable_wig(self): wig = Wig('variable') db.session.add(wig) db.session.commit() def test_create_empty_fixed_wig(self): wig = Wig('variable') db.session.add(wig) db.session.commit() def test_create_wig_with_values(self): wig = Wig('variable') wig.values = [WigValue(i,x,None) for i,x in enumerate(range(0,1000,10))] db.session.add(wig) db.session.commit() self.assertEqual(len(range(0,1000,10)),len(db.session.query(Wig).first().values))
Add beginnings of tests for modelimport unittest from gb import db,app from gb.models import (Wig, WigValue, BasePair, Bed, Annotation, Fasta, User, Track, View, ViewTrack) import logging logging.basicConfig() LOG = logging.getLogger(__name__) class TestSetUp(unittest.TestCase): def setUp(self): app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://' db.create_all() def tearDown(self): db.session.remove() db.drop_all() def test_set_up(self): self.assertFalse(db.session.query(User).all()) class TestWig(unittest.TestCase): def setUp(self): app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://' db.create_all() def tearDown(self): db.session.remove() db.drop_all() def test_create_empty_variable_wig(self): wig = Wig('variable') db.session.add(wig) db.session.commit() def test_create_empty_fixed_wig(self): wig = Wig('variable') db.session.add(wig) db.session.commit() def test_create_wig_with_values(self): wig = Wig('variable') wig.values = [WigValue(i,x,None) for i,x in enumerate(range(0,1000,10))] db.session.add(wig) db.session.commit() self.assertEqual(len(range(0,1000,10)),len(db.session.query(Wig).first().values))
<commit_before><commit_msg>Add beginnings of tests for model<commit_after>import unittest from gb import db,app from gb.models import (Wig, WigValue, BasePair, Bed, Annotation, Fasta, User, Track, View, ViewTrack) import logging logging.basicConfig() LOG = logging.getLogger(__name__) class TestSetUp(unittest.TestCase): def setUp(self): app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://' db.create_all() def tearDown(self): db.session.remove() db.drop_all() def test_set_up(self): self.assertFalse(db.session.query(User).all()) class TestWig(unittest.TestCase): def setUp(self): app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://' db.create_all() def tearDown(self): db.session.remove() db.drop_all() def test_create_empty_variable_wig(self): wig = Wig('variable') db.session.add(wig) db.session.commit() def test_create_empty_fixed_wig(self): wig = Wig('variable') db.session.add(wig) db.session.commit() def test_create_wig_with_values(self): wig = Wig('variable') wig.values = [WigValue(i,x,None) for i,x in enumerate(range(0,1000,10))] db.session.add(wig) db.session.commit() self.assertEqual(len(range(0,1000,10)),len(db.session.query(Wig).first().values))
5303ad1f603d2d357d20f270f6342edf7b05cb9d
pylp/utils/paths.py
pylp/utils/paths.py
""" Some useful functions for using paths. Copyright (C) 2017 The Pylp Authors. This file is under the MIT License. """ import os.path # Make a path more "readable" def make_readable_path(path): home = os.path.expanduser("~") if path.startswith(home): path = "~" + path[len(home):] return path
Make pylpfile path more readable
Make pylpfile path more readable
Python
mit
pylp/pylp
Make pylpfile path more readable
""" Some useful functions for using paths. Copyright (C) 2017 The Pylp Authors. This file is under the MIT License. """ import os.path # Make a path more "readable" def make_readable_path(path): home = os.path.expanduser("~") if path.startswith(home): path = "~" + path[len(home):] return path
<commit_before><commit_msg>Make pylpfile path more readable<commit_after>
""" Some useful functions for using paths. Copyright (C) 2017 The Pylp Authors. This file is under the MIT License. """ import os.path # Make a path more "readable" def make_readable_path(path): home = os.path.expanduser("~") if path.startswith(home): path = "~" + path[len(home):] return path
Make pylpfile path more readable""" Some useful functions for using paths. Copyright (C) 2017 The Pylp Authors. This file is under the MIT License. """ import os.path # Make a path more "readable" def make_readable_path(path): home = os.path.expanduser("~") if path.startswith(home): path = "~" + path[len(home):] return path
<commit_before><commit_msg>Make pylpfile path more readable<commit_after>""" Some useful functions for using paths. Copyright (C) 2017 The Pylp Authors. This file is under the MIT License. """ import os.path # Make a path more "readable" def make_readable_path(path): home = os.path.expanduser("~") if path.startswith(home): path = "~" + path[len(home):] return path
5781c36567aac9bd41d126d641be1e7cafdac1de
inselect/tests/test_basic.py
inselect/tests/test_basic.py
from inselect.image_viewer import ImageViewer import os import sys from PySide import QtGui import numpy as np from skimage import data_dir def test_main(): app = QtGui.QApplication(sys.argv) window = ImageViewer() window.open(os.path.join(data_dir, 'chelsea.png')) window.segment()
Add basic GUI instantiation test
Add basic GUI instantiation test
Python
bsd-3-clause
NaturalHistoryMuseum/inselect,NaturalHistoryMuseum/inselect
Add basic GUI instantiation test
from inselect.image_viewer import ImageViewer import os import sys from PySide import QtGui import numpy as np from skimage import data_dir def test_main(): app = QtGui.QApplication(sys.argv) window = ImageViewer() window.open(os.path.join(data_dir, 'chelsea.png')) window.segment()
<commit_before><commit_msg>Add basic GUI instantiation test<commit_after>
from inselect.image_viewer import ImageViewer import os import sys from PySide import QtGui import numpy as np from skimage import data_dir def test_main(): app = QtGui.QApplication(sys.argv) window = ImageViewer() window.open(os.path.join(data_dir, 'chelsea.png')) window.segment()
Add basic GUI instantiation testfrom inselect.image_viewer import ImageViewer import os import sys from PySide import QtGui import numpy as np from skimage import data_dir def test_main(): app = QtGui.QApplication(sys.argv) window = ImageViewer() window.open(os.path.join(data_dir, 'chelsea.png')) window.segment()
<commit_before><commit_msg>Add basic GUI instantiation test<commit_after>from inselect.image_viewer import ImageViewer import os import sys from PySide import QtGui import numpy as np from skimage import data_dir def test_main(): app = QtGui.QApplication(sys.argv) window = ImageViewer() window.open(os.path.join(data_dir, 'chelsea.png')) window.segment()
1019631b883c00550b6d3344de3649d7e9a98ef1
tests/test_irc_formatter.py
tests/test_irc_formatter.py
# -*- coding: utf-8 -*- """ Pygments HTML formatter tests ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from __future__ import print_function import io import os import re import unittest import tempfile from os.path import join, dirname, isfile from pygments.util import StringIO from pygments.lexers import PythonLexer from pygments.formatters import IRCFormatter import support tokensource = list(PythonLexer().get_tokens("lambda x: 123")) class HtmlFormatterTest(unittest.TestCase): def test_correct_output(self): hfmt = IRCFormatter() houtfile = StringIO() hfmt.format(tokensource, houtfile) self.assertEqual(u'\x0302lambda\x03 x: \x0302123\x03\n', houtfile.getvalue())
Add basic test for irc formatter
Add basic test for irc formatter
Python
bsd-2-clause
pygments/pygments,dscorbett/pygments,pygments/pygments,dscorbett/pygments,dscorbett/pygments,dscorbett/pygments,dscorbett/pygments,pygments/pygments,dscorbett/pygments,dscorbett/pygments,pygments/pygments,pygments/pygments,pygments/pygments,pygments/pygments,pygments/pygments,dscorbett/pygments,dscorbett/pygments,dscorbett/pygments,pygments/pygments,dscorbett/pygments,pygments/pygments,dscorbett/pygments,pygments/pygments,dscorbett/pygments,dscorbett/pygments,dscorbett/pygments,dscorbett/pygments,pygments/pygments,dscorbett/pygments,dscorbett/pygments,pygments/pygments,pygments/pygments,pygments/pygments,pygments/pygments,pygments/pygments,pygments/pygments
Add basic test for irc formatter
# -*- coding: utf-8 -*- """ Pygments HTML formatter tests ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from __future__ import print_function import io import os import re import unittest import tempfile from os.path import join, dirname, isfile from pygments.util import StringIO from pygments.lexers import PythonLexer from pygments.formatters import IRCFormatter import support tokensource = list(PythonLexer().get_tokens("lambda x: 123")) class HtmlFormatterTest(unittest.TestCase): def test_correct_output(self): hfmt = IRCFormatter() houtfile = StringIO() hfmt.format(tokensource, houtfile) self.assertEqual(u'\x0302lambda\x03 x: \x0302123\x03\n', houtfile.getvalue())
<commit_before><commit_msg>Add basic test for irc formatter<commit_after>
# -*- coding: utf-8 -*- """ Pygments HTML formatter tests ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from __future__ import print_function import io import os import re import unittest import tempfile from os.path import join, dirname, isfile from pygments.util import StringIO from pygments.lexers import PythonLexer from pygments.formatters import IRCFormatter import support tokensource = list(PythonLexer().get_tokens("lambda x: 123")) class HtmlFormatterTest(unittest.TestCase): def test_correct_output(self): hfmt = IRCFormatter() houtfile = StringIO() hfmt.format(tokensource, houtfile) self.assertEqual(u'\x0302lambda\x03 x: \x0302123\x03\n', houtfile.getvalue())
Add basic test for irc formatter# -*- coding: utf-8 -*- """ Pygments HTML formatter tests ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from __future__ import print_function import io import os import re import unittest import tempfile from os.path import join, dirname, isfile from pygments.util import StringIO from pygments.lexers import PythonLexer from pygments.formatters import IRCFormatter import support tokensource = list(PythonLexer().get_tokens("lambda x: 123")) class HtmlFormatterTest(unittest.TestCase): def test_correct_output(self): hfmt = IRCFormatter() houtfile = StringIO() hfmt.format(tokensource, houtfile) self.assertEqual(u'\x0302lambda\x03 x: \x0302123\x03\n', houtfile.getvalue())
<commit_before><commit_msg>Add basic test for irc formatter<commit_after># -*- coding: utf-8 -*- """ Pygments HTML formatter tests ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from __future__ import print_function import io import os import re import unittest import tempfile from os.path import join, dirname, isfile from pygments.util import StringIO from pygments.lexers import PythonLexer from pygments.formatters import IRCFormatter import support tokensource = list(PythonLexer().get_tokens("lambda x: 123")) class HtmlFormatterTest(unittest.TestCase): def test_correct_output(self): hfmt = IRCFormatter() houtfile = StringIO() hfmt.format(tokensource, houtfile) self.assertEqual(u'\x0302lambda\x03 x: \x0302123\x03\n', houtfile.getvalue())
1a5f1431886b3b7517db7180fa0f53d71c779901
tests/test_indentxml.py
tests/test_indentxml.py
from unittest import TestCase import sublime class IndentXmlBase(TestCase): src = "" def set_text(self, string): self.view.run_command("insert", {"characters": string}) def get_text(self): return self.view.substr(sublime.Region(0, self.view.size())) def setUp(self): self.view = sublime.active_window().new_file() def tearDown(self): if self.view: self.view.set_scratch(True) self.view.window().focus_view(self.view) self.view.window().run_command("close_file") def indent(self): self.view.run_command("indent_xml") class TestBasicIndentXml(IndentXmlBase): src = "<root><node></node></root>" expected = "<root>\n\t<node/>\n</root>" def test_foo(self): self.set_text(self.src) self.indent() self.assertEqual(self.get_text(), self.expected)
Add unit-tests for the plugin
Add unit-tests for the plugin Unit-tests are using https://github.com/randy3k/UnitTesting
Python
mit
alek-sys/sublimetext_indentxml,alek-sys/sublimetext_indentxml
Add unit-tests for the plugin Unit-tests are using https://github.com/randy3k/UnitTesting
from unittest import TestCase import sublime class IndentXmlBase(TestCase): src = "" def set_text(self, string): self.view.run_command("insert", {"characters": string}) def get_text(self): return self.view.substr(sublime.Region(0, self.view.size())) def setUp(self): self.view = sublime.active_window().new_file() def tearDown(self): if self.view: self.view.set_scratch(True) self.view.window().focus_view(self.view) self.view.window().run_command("close_file") def indent(self): self.view.run_command("indent_xml") class TestBasicIndentXml(IndentXmlBase): src = "<root><node></node></root>" expected = "<root>\n\t<node/>\n</root>" def test_foo(self): self.set_text(self.src) self.indent() self.assertEqual(self.get_text(), self.expected)
<commit_before><commit_msg>Add unit-tests for the plugin Unit-tests are using https://github.com/randy3k/UnitTesting<commit_after>
from unittest import TestCase import sublime class IndentXmlBase(TestCase): src = "" def set_text(self, string): self.view.run_command("insert", {"characters": string}) def get_text(self): return self.view.substr(sublime.Region(0, self.view.size())) def setUp(self): self.view = sublime.active_window().new_file() def tearDown(self): if self.view: self.view.set_scratch(True) self.view.window().focus_view(self.view) self.view.window().run_command("close_file") def indent(self): self.view.run_command("indent_xml") class TestBasicIndentXml(IndentXmlBase): src = "<root><node></node></root>" expected = "<root>\n\t<node/>\n</root>" def test_foo(self): self.set_text(self.src) self.indent() self.assertEqual(self.get_text(), self.expected)
Add unit-tests for the plugin Unit-tests are using https://github.com/randy3k/UnitTestingfrom unittest import TestCase import sublime class IndentXmlBase(TestCase): src = "" def set_text(self, string): self.view.run_command("insert", {"characters": string}) def get_text(self): return self.view.substr(sublime.Region(0, self.view.size())) def setUp(self): self.view = sublime.active_window().new_file() def tearDown(self): if self.view: self.view.set_scratch(True) self.view.window().focus_view(self.view) self.view.window().run_command("close_file") def indent(self): self.view.run_command("indent_xml") class TestBasicIndentXml(IndentXmlBase): src = "<root><node></node></root>" expected = "<root>\n\t<node/>\n</root>" def test_foo(self): self.set_text(self.src) self.indent() self.assertEqual(self.get_text(), self.expected)
<commit_before><commit_msg>Add unit-tests for the plugin Unit-tests are using https://github.com/randy3k/UnitTesting<commit_after>from unittest import TestCase import sublime class IndentXmlBase(TestCase): src = "" def set_text(self, string): self.view.run_command("insert", {"characters": string}) def get_text(self): return self.view.substr(sublime.Region(0, self.view.size())) def setUp(self): self.view = sublime.active_window().new_file() def tearDown(self): if self.view: self.view.set_scratch(True) self.view.window().focus_view(self.view) self.view.window().run_command("close_file") def indent(self): self.view.run_command("indent_xml") class TestBasicIndentXml(IndentXmlBase): src = "<root><node></node></root>" expected = "<root>\n\t<node/>\n</root>" def test_foo(self): self.set_text(self.src) self.indent() self.assertEqual(self.get_text(), self.expected)
4a81ee90a39c4831dca186ae269184c703ddbf2e
src/autobot/src/udpRemote.py
src/autobot/src/udpRemote.py
#!/usr/bin/env python import rospy import socket from autobot.msg import drive_param """ Warning: This code has not been tested at all Protocol could be comma delimited Vaa.aa;Abb.bb TODO - [ ] Unit test - [ ] Define protocol - [ ] Use select() for non-blocking operation - [ ] Use a timeout for setting drive/angle to '0' (safety) """ def parseCommand(cmd, pub, driveParam): """ pass in Paa.aa where P is the ID of the command """ val = 0.0 # first test to see if able to parse the value from substring try: val = float(cmd.substring(1)) except ValueError: return driveParam # unable to parse, bail if cmd[0] == 'V': driveParam.velocity = val elif cmd[0] == 'A': driveParam.angle = val return driveParam # valid drive parameter parsed def parseMessage(msg, pub): """ Attempts to parse a message for a proper command string If the command string is valid, a drive parameter will be published """ driveParam = drive_param() if ";" in msg: arr = msg.split(";") for cmd in arr: driveParam = parseCommand(cmd, driveParam) pub.publish(driveParam) else: pass def main(): UDP_IP = "127.0.0.1" # loopback UDP_PORT = 11156 rospy.init_node("udpRemote", anonymous=True) pub = rospy.Publisher("drive_parameters", drive_param, queue_size=10) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.bind((UDP_IP, UDP_PORT)) while True: data, addr = sock.recvfrom(1024) parseMessage(str(data, "utf-8"), pub) if __name__ == "__main__": main()
Add code for udp remote
Add code for udp remote This code is untested and is still WIP.
Python
mit
atkvo/masters-bot,atkvo/masters-bot,atkvo/masters-bot,atkvo/masters-bot,atkvo/masters-bot
Add code for udp remote This code is untested and is still WIP.
#!/usr/bin/env python import rospy import socket from autobot.msg import drive_param """ Warning: This code has not been tested at all Protocol could be comma delimited Vaa.aa;Abb.bb TODO - [ ] Unit test - [ ] Define protocol - [ ] Use select() for non-blocking operation - [ ] Use a timeout for setting drive/angle to '0' (safety) """ def parseCommand(cmd, pub, driveParam): """ pass in Paa.aa where P is the ID of the command """ val = 0.0 # first test to see if able to parse the value from substring try: val = float(cmd.substring(1)) except ValueError: return driveParam # unable to parse, bail if cmd[0] == 'V': driveParam.velocity = val elif cmd[0] == 'A': driveParam.angle = val return driveParam # valid drive parameter parsed def parseMessage(msg, pub): """ Attempts to parse a message for a proper command string If the command string is valid, a drive parameter will be published """ driveParam = drive_param() if ";" in msg: arr = msg.split(";") for cmd in arr: driveParam = parseCommand(cmd, driveParam) pub.publish(driveParam) else: pass def main(): UDP_IP = "127.0.0.1" # loopback UDP_PORT = 11156 rospy.init_node("udpRemote", anonymous=True) pub = rospy.Publisher("drive_parameters", drive_param, queue_size=10) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.bind((UDP_IP, UDP_PORT)) while True: data, addr = sock.recvfrom(1024) parseMessage(str(data, "utf-8"), pub) if __name__ == "__main__": main()
<commit_before><commit_msg>Add code for udp remote This code is untested and is still WIP.<commit_after>
#!/usr/bin/env python import rospy import socket from autobot.msg import drive_param """ Warning: This code has not been tested at all Protocol could be comma delimited Vaa.aa;Abb.bb TODO - [ ] Unit test - [ ] Define protocol - [ ] Use select() for non-blocking operation - [ ] Use a timeout for setting drive/angle to '0' (safety) """ def parseCommand(cmd, pub, driveParam): """ pass in Paa.aa where P is the ID of the command """ val = 0.0 # first test to see if able to parse the value from substring try: val = float(cmd.substring(1)) except ValueError: return driveParam # unable to parse, bail if cmd[0] == 'V': driveParam.velocity = val elif cmd[0] == 'A': driveParam.angle = val return driveParam # valid drive parameter parsed def parseMessage(msg, pub): """ Attempts to parse a message for a proper command string If the command string is valid, a drive parameter will be published """ driveParam = drive_param() if ";" in msg: arr = msg.split(";") for cmd in arr: driveParam = parseCommand(cmd, driveParam) pub.publish(driveParam) else: pass def main(): UDP_IP = "127.0.0.1" # loopback UDP_PORT = 11156 rospy.init_node("udpRemote", anonymous=True) pub = rospy.Publisher("drive_parameters", drive_param, queue_size=10) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.bind((UDP_IP, UDP_PORT)) while True: data, addr = sock.recvfrom(1024) parseMessage(str(data, "utf-8"), pub) if __name__ == "__main__": main()
Add code for udp remote This code is untested and is still WIP.#!/usr/bin/env python import rospy import socket from autobot.msg import drive_param """ Warning: This code has not been tested at all Protocol could be comma delimited Vaa.aa;Abb.bb TODO - [ ] Unit test - [ ] Define protocol - [ ] Use select() for non-blocking operation - [ ] Use a timeout for setting drive/angle to '0' (safety) """ def parseCommand(cmd, pub, driveParam): """ pass in Paa.aa where P is the ID of the command """ val = 0.0 # first test to see if able to parse the value from substring try: val = float(cmd.substring(1)) except ValueError: return driveParam # unable to parse, bail if cmd[0] == 'V': driveParam.velocity = val elif cmd[0] == 'A': driveParam.angle = val return driveParam # valid drive parameter parsed def parseMessage(msg, pub): """ Attempts to parse a message for a proper command string If the command string is valid, a drive parameter will be published """ driveParam = drive_param() if ";" in msg: arr = msg.split(";") for cmd in arr: driveParam = parseCommand(cmd, driveParam) pub.publish(driveParam) else: pass def main(): UDP_IP = "127.0.0.1" # loopback UDP_PORT = 11156 rospy.init_node("udpRemote", anonymous=True) pub = rospy.Publisher("drive_parameters", drive_param, queue_size=10) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.bind((UDP_IP, UDP_PORT)) while True: data, addr = sock.recvfrom(1024) parseMessage(str(data, "utf-8"), pub) if __name__ == "__main__": main()
<commit_before><commit_msg>Add code for udp remote This code is untested and is still WIP.<commit_after>#!/usr/bin/env python import rospy import socket from autobot.msg import drive_param """ Warning: This code has not been tested at all Protocol could be comma delimited Vaa.aa;Abb.bb TODO - [ ] Unit test - [ ] Define protocol - [ ] Use select() for non-blocking operation - [ ] Use a timeout for setting drive/angle to '0' (safety) """ def parseCommand(cmd, pub, driveParam): """ pass in Paa.aa where P is the ID of the command """ val = 0.0 # first test to see if able to parse the value from substring try: val = float(cmd.substring(1)) except ValueError: return driveParam # unable to parse, bail if cmd[0] == 'V': driveParam.velocity = val elif cmd[0] == 'A': driveParam.angle = val return driveParam # valid drive parameter parsed def parseMessage(msg, pub): """ Attempts to parse a message for a proper command string If the command string is valid, a drive parameter will be published """ driveParam = drive_param() if ";" in msg: arr = msg.split(";") for cmd in arr: driveParam = parseCommand(cmd, driveParam) pub.publish(driveParam) else: pass def main(): UDP_IP = "127.0.0.1" # loopback UDP_PORT = 11156 rospy.init_node("udpRemote", anonymous=True) pub = rospy.Publisher("drive_parameters", drive_param, queue_size=10) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.bind((UDP_IP, UDP_PORT)) while True: data, addr = sock.recvfrom(1024) parseMessage(str(data, "utf-8"), pub) if __name__ == "__main__": main()
c5546da1d84c7969f51d186bb84298a4fea8bef6
bindings/python/examples/return_region.py
bindings/python/examples/return_region.py
#!/usr/bin/env python # Copyright 2019 Stanford University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function import legion from legion import task, RW @task def make_region(): # If you return a region from a task, the privileges to the region # will be automatically given to the calling task. R = legion.Region.create([4, 4], {'x': legion.float64}) print('returning from make_region with', R) return R @task def make_region_dict(): # It should also work if the region in question is returned as # part of a larger data structure. R = legion.Region.create([4, 4], {'x': legion.float64}) result = {'asdf': R} print('returning from make_region_dict with', result) return result @task(privileges=[RW]) def use_region(R): print('in use_region with', R) R.x.fill(0) @task def main(): R = make_region().get() use_region(R) print('in main with', R) R.x.fill(1) R2 = make_region_dict().get()['asdf'] use_region(R2) print('in main with', R2) R2.x.fill(1) if __name__ == '__legion_main__': main()
Test returning regions from tasks.
python: Test returning regions from tasks.
Python
apache-2.0
StanfordLegion/legion,StanfordLegion/legion,StanfordLegion/legion,StanfordLegion/legion,StanfordLegion/legion,StanfordLegion/legion,StanfordLegion/legion,StanfordLegion/legion
python: Test returning regions from tasks.
#!/usr/bin/env python # Copyright 2019 Stanford University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function import legion from legion import task, RW @task def make_region(): # If you return a region from a task, the privileges to the region # will be automatically given to the calling task. R = legion.Region.create([4, 4], {'x': legion.float64}) print('returning from make_region with', R) return R @task def make_region_dict(): # It should also work if the region in question is returned as # part of a larger data structure. R = legion.Region.create([4, 4], {'x': legion.float64}) result = {'asdf': R} print('returning from make_region_dict with', result) return result @task(privileges=[RW]) def use_region(R): print('in use_region with', R) R.x.fill(0) @task def main(): R = make_region().get() use_region(R) print('in main with', R) R.x.fill(1) R2 = make_region_dict().get()['asdf'] use_region(R2) print('in main with', R2) R2.x.fill(1) if __name__ == '__legion_main__': main()
<commit_before><commit_msg>python: Test returning regions from tasks.<commit_after>
#!/usr/bin/env python # Copyright 2019 Stanford University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function import legion from legion import task, RW @task def make_region(): # If you return a region from a task, the privileges to the region # will be automatically given to the calling task. R = legion.Region.create([4, 4], {'x': legion.float64}) print('returning from make_region with', R) return R @task def make_region_dict(): # It should also work if the region in question is returned as # part of a larger data structure. R = legion.Region.create([4, 4], {'x': legion.float64}) result = {'asdf': R} print('returning from make_region_dict with', result) return result @task(privileges=[RW]) def use_region(R): print('in use_region with', R) R.x.fill(0) @task def main(): R = make_region().get() use_region(R) print('in main with', R) R.x.fill(1) R2 = make_region_dict().get()['asdf'] use_region(R2) print('in main with', R2) R2.x.fill(1) if __name__ == '__legion_main__': main()
python: Test returning regions from tasks.#!/usr/bin/env python # Copyright 2019 Stanford University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function import legion from legion import task, RW @task def make_region(): # If you return a region from a task, the privileges to the region # will be automatically given to the calling task. R = legion.Region.create([4, 4], {'x': legion.float64}) print('returning from make_region with', R) return R @task def make_region_dict(): # It should also work if the region in question is returned as # part of a larger data structure. R = legion.Region.create([4, 4], {'x': legion.float64}) result = {'asdf': R} print('returning from make_region_dict with', result) return result @task(privileges=[RW]) def use_region(R): print('in use_region with', R) R.x.fill(0) @task def main(): R = make_region().get() use_region(R) print('in main with', R) R.x.fill(1) R2 = make_region_dict().get()['asdf'] use_region(R2) print('in main with', R2) R2.x.fill(1) if __name__ == '__legion_main__': main()
<commit_before><commit_msg>python: Test returning regions from tasks.<commit_after>#!/usr/bin/env python # Copyright 2019 Stanford University # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import print_function import legion from legion import task, RW @task def make_region(): # If you return a region from a task, the privileges to the region # will be automatically given to the calling task. R = legion.Region.create([4, 4], {'x': legion.float64}) print('returning from make_region with', R) return R @task def make_region_dict(): # It should also work if the region in question is returned as # part of a larger data structure. R = legion.Region.create([4, 4], {'x': legion.float64}) result = {'asdf': R} print('returning from make_region_dict with', result) return result @task(privileges=[RW]) def use_region(R): print('in use_region with', R) R.x.fill(0) @task def main(): R = make_region().get() use_region(R) print('in main with', R) R.x.fill(1) R2 = make_region_dict().get()['asdf'] use_region(R2) print('in main with', R2) R2.x.fill(1) if __name__ == '__legion_main__': main()
0ca5e2fa612687533c4a60dd1e2463fde6fc0bd6
Utilities/ConvertLanguage.py
Utilities/ConvertLanguage.py
#!/usr/bin/env python ################################################################################ # ConvertLanguage.py - converts an old 3RVX version 2.X language file to the new # 3.X format. A 'template' file is used to provide the missing strings that # didn't exist in the old 2.X translations. # # Matthew Malensek <matt@malensek.net> ################################################################################ def convert(input, template, output): from xml.etree import ElementTree convert = ElementTree.parse(input) cr = convert.getroot() template = ElementTree.parse(template) tr = template.getroot() strings = set() for strtag in tr.findall('string'): strings.add(strtag.find('original').text) translations = {} for wordtag in cr.findall('word'): str = wordtag.find('key').text if str in strings: trans = wordtag.find('translation').text translations[str] = trans for strtag in tr.findall('string'): orig = strtag.find('original') trans = strtag.find('translation') if orig == None or trans == None: continue if orig.text in translations: trans = strtag.find('translation') print(trans.text + ' -> ' + translations[orig.text]) trans.text = translations[orig.text] else: trans.text = orig.text template.write(output, encoding='UTF-8') def print_usage(appname='ConvertLanguage'): print('Usage: ' + appname + ' input.xml template.xml output.xml') print('File descriptions:') print('input.xml The old (v2) language file to convert') print('template.xml Template file to use for missing translations') print('output.xml Output file name') if __name__ == "__main__": import os import sys args = sys.argv if len(args) < 3: print_usage(args[0]) else: convert(args[1], args[2], args[3])
Add script to convert old language files
Add script to convert old language files
Python
bsd-2-clause
malensek/3RVX,Soulflare3/3RVX,Soulflare3/3RVX,malensek/3RVX,malensek/3RVX,Soulflare3/3RVX
Add script to convert old language files
#!/usr/bin/env python ################################################################################ # ConvertLanguage.py - converts an old 3RVX version 2.X language file to the new # 3.X format. A 'template' file is used to provide the missing strings that # didn't exist in the old 2.X translations. # # Matthew Malensek <matt@malensek.net> ################################################################################ def convert(input, template, output): from xml.etree import ElementTree convert = ElementTree.parse(input) cr = convert.getroot() template = ElementTree.parse(template) tr = template.getroot() strings = set() for strtag in tr.findall('string'): strings.add(strtag.find('original').text) translations = {} for wordtag in cr.findall('word'): str = wordtag.find('key').text if str in strings: trans = wordtag.find('translation').text translations[str] = trans for strtag in tr.findall('string'): orig = strtag.find('original') trans = strtag.find('translation') if orig == None or trans == None: continue if orig.text in translations: trans = strtag.find('translation') print(trans.text + ' -> ' + translations[orig.text]) trans.text = translations[orig.text] else: trans.text = orig.text template.write(output, encoding='UTF-8') def print_usage(appname='ConvertLanguage'): print('Usage: ' + appname + ' input.xml template.xml output.xml') print('File descriptions:') print('input.xml The old (v2) language file to convert') print('template.xml Template file to use for missing translations') print('output.xml Output file name') if __name__ == "__main__": import os import sys args = sys.argv if len(args) < 3: print_usage(args[0]) else: convert(args[1], args[2], args[3])
<commit_before><commit_msg>Add script to convert old language files<commit_after>
#!/usr/bin/env python ################################################################################ # ConvertLanguage.py - converts an old 3RVX version 2.X language file to the new # 3.X format. A 'template' file is used to provide the missing strings that # didn't exist in the old 2.X translations. # # Matthew Malensek <matt@malensek.net> ################################################################################ def convert(input, template, output): from xml.etree import ElementTree convert = ElementTree.parse(input) cr = convert.getroot() template = ElementTree.parse(template) tr = template.getroot() strings = set() for strtag in tr.findall('string'): strings.add(strtag.find('original').text) translations = {} for wordtag in cr.findall('word'): str = wordtag.find('key').text if str in strings: trans = wordtag.find('translation').text translations[str] = trans for strtag in tr.findall('string'): orig = strtag.find('original') trans = strtag.find('translation') if orig == None or trans == None: continue if orig.text in translations: trans = strtag.find('translation') print(trans.text + ' -> ' + translations[orig.text]) trans.text = translations[orig.text] else: trans.text = orig.text template.write(output, encoding='UTF-8') def print_usage(appname='ConvertLanguage'): print('Usage: ' + appname + ' input.xml template.xml output.xml') print('File descriptions:') print('input.xml The old (v2) language file to convert') print('template.xml Template file to use for missing translations') print('output.xml Output file name') if __name__ == "__main__": import os import sys args = sys.argv if len(args) < 3: print_usage(args[0]) else: convert(args[1], args[2], args[3])
Add script to convert old language files#!/usr/bin/env python ################################################################################ # ConvertLanguage.py - converts an old 3RVX version 2.X language file to the new # 3.X format. A 'template' file is used to provide the missing strings that # didn't exist in the old 2.X translations. # # Matthew Malensek <matt@malensek.net> ################################################################################ def convert(input, template, output): from xml.etree import ElementTree convert = ElementTree.parse(input) cr = convert.getroot() template = ElementTree.parse(template) tr = template.getroot() strings = set() for strtag in tr.findall('string'): strings.add(strtag.find('original').text) translations = {} for wordtag in cr.findall('word'): str = wordtag.find('key').text if str in strings: trans = wordtag.find('translation').text translations[str] = trans for strtag in tr.findall('string'): orig = strtag.find('original') trans = strtag.find('translation') if orig == None or trans == None: continue if orig.text in translations: trans = strtag.find('translation') print(trans.text + ' -> ' + translations[orig.text]) trans.text = translations[orig.text] else: trans.text = orig.text template.write(output, encoding='UTF-8') def print_usage(appname='ConvertLanguage'): print('Usage: ' + appname + ' input.xml template.xml output.xml') print('File descriptions:') print('input.xml The old (v2) language file to convert') print('template.xml Template file to use for missing translations') print('output.xml Output file name') if __name__ == "__main__": import os import sys args = sys.argv if len(args) < 3: print_usage(args[0]) else: convert(args[1], args[2], args[3])
<commit_before><commit_msg>Add script to convert old language files<commit_after>#!/usr/bin/env python ################################################################################ # ConvertLanguage.py - converts an old 3RVX version 2.X language file to the new # 3.X format. A 'template' file is used to provide the missing strings that # didn't exist in the old 2.X translations. # # Matthew Malensek <matt@malensek.net> ################################################################################ def convert(input, template, output): from xml.etree import ElementTree convert = ElementTree.parse(input) cr = convert.getroot() template = ElementTree.parse(template) tr = template.getroot() strings = set() for strtag in tr.findall('string'): strings.add(strtag.find('original').text) translations = {} for wordtag in cr.findall('word'): str = wordtag.find('key').text if str in strings: trans = wordtag.find('translation').text translations[str] = trans for strtag in tr.findall('string'): orig = strtag.find('original') trans = strtag.find('translation') if orig == None or trans == None: continue if orig.text in translations: trans = strtag.find('translation') print(trans.text + ' -> ' + translations[orig.text]) trans.text = translations[orig.text] else: trans.text = orig.text template.write(output, encoding='UTF-8') def print_usage(appname='ConvertLanguage'): print('Usage: ' + appname + ' input.xml template.xml output.xml') print('File descriptions:') print('input.xml The old (v2) language file to convert') print('template.xml Template file to use for missing translations') print('output.xml Output file name') if __name__ == "__main__": import os import sys args = sys.argv if len(args) < 3: print_usage(args[0]) else: convert(args[1], args[2], args[3])
ff92928b063ff36bb71945b6ff1c75d149411bc7
helloTurtle.py
helloTurtle.py
# drawing out a hello world with the python turtle library! # extension: different modules for each turtle so letter color, etc. can be easily changed. # TODO: fix angle errors, draw the rest of the letters import turtle wn = turtle.Screen() h = turtle.Turtle() # move module to the left side of the screen h.penup() h.forward(-240) h.pendown() # draw the H h.left(90) h.forward(120) h.penup() h.forward(-60) h.right(90) h.pendown() i = 0 while (i<90): h.forward(1) h.right(2) i+=2 h.right(1) h.forward(30) #draw the E h.penup() h.left(90) h.forward(15) h.left(90) h.forward(30) h.right(90) h.pendown() h.forward(60) h.left(90) i=0 while (i<315): h.forward(1) h.left(2) i+=2 wn.exitonclick()
Add the .py file to the repo.
Add the .py file to the repo. This file is where the actual program is!
Python
cc0-1.0
slohmes/Hello-Turtle
Add the .py file to the repo. This file is where the actual program is!
# drawing out a hello world with the python turtle library! # extension: different modules for each turtle so letter color, etc. can be easily changed. # TODO: fix angle errors, draw the rest of the letters import turtle wn = turtle.Screen() h = turtle.Turtle() # move module to the left side of the screen h.penup() h.forward(-240) h.pendown() # draw the H h.left(90) h.forward(120) h.penup() h.forward(-60) h.right(90) h.pendown() i = 0 while (i<90): h.forward(1) h.right(2) i+=2 h.right(1) h.forward(30) #draw the E h.penup() h.left(90) h.forward(15) h.left(90) h.forward(30) h.right(90) h.pendown() h.forward(60) h.left(90) i=0 while (i<315): h.forward(1) h.left(2) i+=2 wn.exitonclick()
<commit_before><commit_msg>Add the .py file to the repo. This file is where the actual program is!<commit_after>
# drawing out a hello world with the python turtle library! # extension: different modules for each turtle so letter color, etc. can be easily changed. # TODO: fix angle errors, draw the rest of the letters import turtle wn = turtle.Screen() h = turtle.Turtle() # move module to the left side of the screen h.penup() h.forward(-240) h.pendown() # draw the H h.left(90) h.forward(120) h.penup() h.forward(-60) h.right(90) h.pendown() i = 0 while (i<90): h.forward(1) h.right(2) i+=2 h.right(1) h.forward(30) #draw the E h.penup() h.left(90) h.forward(15) h.left(90) h.forward(30) h.right(90) h.pendown() h.forward(60) h.left(90) i=0 while (i<315): h.forward(1) h.left(2) i+=2 wn.exitonclick()
Add the .py file to the repo. This file is where the actual program is!# drawing out a hello world with the python turtle library! # extension: different modules for each turtle so letter color, etc. can be easily changed. # TODO: fix angle errors, draw the rest of the letters import turtle wn = turtle.Screen() h = turtle.Turtle() # move module to the left side of the screen h.penup() h.forward(-240) h.pendown() # draw the H h.left(90) h.forward(120) h.penup() h.forward(-60) h.right(90) h.pendown() i = 0 while (i<90): h.forward(1) h.right(2) i+=2 h.right(1) h.forward(30) #draw the E h.penup() h.left(90) h.forward(15) h.left(90) h.forward(30) h.right(90) h.pendown() h.forward(60) h.left(90) i=0 while (i<315): h.forward(1) h.left(2) i+=2 wn.exitonclick()
<commit_before><commit_msg>Add the .py file to the repo. This file is where the actual program is!<commit_after># drawing out a hello world with the python turtle library! # extension: different modules for each turtle so letter color, etc. can be easily changed. # TODO: fix angle errors, draw the rest of the letters import turtle wn = turtle.Screen() h = turtle.Turtle() # move module to the left side of the screen h.penup() h.forward(-240) h.pendown() # draw the H h.left(90) h.forward(120) h.penup() h.forward(-60) h.right(90) h.pendown() i = 0 while (i<90): h.forward(1) h.right(2) i+=2 h.right(1) h.forward(30) #draw the E h.penup() h.left(90) h.forward(15) h.left(90) h.forward(30) h.right(90) h.pendown() h.forward(60) h.left(90) i=0 while (i<315): h.forward(1) h.left(2) i+=2 wn.exitonclick()
6886dc54980b5c2c66bac585a36f0d24e7cc9eb0
tests/test_tools.py
tests/test_tools.py
"""Test the functions in the tools file.""" import bibpy.tools def test_version_format(): assert bibpy.tools.version_format().format('0.1.0') == '%(prog)s v0.1.0' program_name = dict(prog='tool_name') assert (bibpy.tools.version_format() % program_name).format('2.3') ==\ 'tool_name v2.3' def test_key_grammar(): pass def test_entry_grammar(): pass def test_field_grammar(): pass def test_numeric_grammar(): pass def test_parse_query(): assert bibpy.tools.parse_query('~Author') == ('entry', ['~', 'Author']) assert bibpy.tools.parse_query('!Author') == ('entry', ['!', 'Author']) def test_predicate_composition(): pass
Add initial tests for bibpy tools
Add initial tests for bibpy tools
Python
mit
MisanthropicBit/bibpy,MisanthropicBit/bibpy
Add initial tests for bibpy tools
"""Test the functions in the tools file.""" import bibpy.tools def test_version_format(): assert bibpy.tools.version_format().format('0.1.0') == '%(prog)s v0.1.0' program_name = dict(prog='tool_name') assert (bibpy.tools.version_format() % program_name).format('2.3') ==\ 'tool_name v2.3' def test_key_grammar(): pass def test_entry_grammar(): pass def test_field_grammar(): pass def test_numeric_grammar(): pass def test_parse_query(): assert bibpy.tools.parse_query('~Author') == ('entry', ['~', 'Author']) assert bibpy.tools.parse_query('!Author') == ('entry', ['!', 'Author']) def test_predicate_composition(): pass
<commit_before><commit_msg>Add initial tests for bibpy tools<commit_after>
"""Test the functions in the tools file.""" import bibpy.tools def test_version_format(): assert bibpy.tools.version_format().format('0.1.0') == '%(prog)s v0.1.0' program_name = dict(prog='tool_name') assert (bibpy.tools.version_format() % program_name).format('2.3') ==\ 'tool_name v2.3' def test_key_grammar(): pass def test_entry_grammar(): pass def test_field_grammar(): pass def test_numeric_grammar(): pass def test_parse_query(): assert bibpy.tools.parse_query('~Author') == ('entry', ['~', 'Author']) assert bibpy.tools.parse_query('!Author') == ('entry', ['!', 'Author']) def test_predicate_composition(): pass
Add initial tests for bibpy tools"""Test the functions in the tools file.""" import bibpy.tools def test_version_format(): assert bibpy.tools.version_format().format('0.1.0') == '%(prog)s v0.1.0' program_name = dict(prog='tool_name') assert (bibpy.tools.version_format() % program_name).format('2.3') ==\ 'tool_name v2.3' def test_key_grammar(): pass def test_entry_grammar(): pass def test_field_grammar(): pass def test_numeric_grammar(): pass def test_parse_query(): assert bibpy.tools.parse_query('~Author') == ('entry', ['~', 'Author']) assert bibpy.tools.parse_query('!Author') == ('entry', ['!', 'Author']) def test_predicate_composition(): pass
<commit_before><commit_msg>Add initial tests for bibpy tools<commit_after>"""Test the functions in the tools file.""" import bibpy.tools def test_version_format(): assert bibpy.tools.version_format().format('0.1.0') == '%(prog)s v0.1.0' program_name = dict(prog='tool_name') assert (bibpy.tools.version_format() % program_name).format('2.3') ==\ 'tool_name v2.3' def test_key_grammar(): pass def test_entry_grammar(): pass def test_field_grammar(): pass def test_numeric_grammar(): pass def test_parse_query(): assert bibpy.tools.parse_query('~Author') == ('entry', ['~', 'Author']) assert bibpy.tools.parse_query('!Author') == ('entry', ['!', 'Author']) def test_predicate_composition(): pass
81a77ec5e354448f328fa4d65154cea65709eac0
backend/scripts/fixdrafts.py
backend/scripts/fixdrafts.py
#!/usr/bin/env python import rethinkdb as r import optparse if __name__ == "__main__": parser = optparse.OptionParser() parser.add_option("-P", "--port", dest="port", type="int", help="rethinkdb port", default=30815) (options, args) = parser.parse_args() conn = r.connect('localhost', options.port, db='materialscommons') drafts = list(r.table('drafts').run(conn, time_format='raw')) for draft in drafts: if 'process' not in draft: continue input_conditions = draft['process']['input_conditions'] if 'Specimen Prep' in input_conditions: sp = input_conditions['Specimen Prep'] for prop in sp['default_properties']: if prop['attribute'] == 'preparation': prop['value_choice'] = ["Electropolish", "FIB Liftout", "Other"] prop['unit_choice'] = [] prop['value'] = prop['unit'] prop['unit'] = "" r.table('drafts').get(draft['id']).update(draft).run(conn)
Add script to fix Emmanuelle's messed up drafts.
Add script to fix Emmanuelle's messed up drafts.
Python
mit
materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org
Add script to fix Emmanuelle's messed up drafts.
#!/usr/bin/env python import rethinkdb as r import optparse if __name__ == "__main__": parser = optparse.OptionParser() parser.add_option("-P", "--port", dest="port", type="int", help="rethinkdb port", default=30815) (options, args) = parser.parse_args() conn = r.connect('localhost', options.port, db='materialscommons') drafts = list(r.table('drafts').run(conn, time_format='raw')) for draft in drafts: if 'process' not in draft: continue input_conditions = draft['process']['input_conditions'] if 'Specimen Prep' in input_conditions: sp = input_conditions['Specimen Prep'] for prop in sp['default_properties']: if prop['attribute'] == 'preparation': prop['value_choice'] = ["Electropolish", "FIB Liftout", "Other"] prop['unit_choice'] = [] prop['value'] = prop['unit'] prop['unit'] = "" r.table('drafts').get(draft['id']).update(draft).run(conn)
<commit_before><commit_msg>Add script to fix Emmanuelle's messed up drafts.<commit_after>
#!/usr/bin/env python import rethinkdb as r import optparse if __name__ == "__main__": parser = optparse.OptionParser() parser.add_option("-P", "--port", dest="port", type="int", help="rethinkdb port", default=30815) (options, args) = parser.parse_args() conn = r.connect('localhost', options.port, db='materialscommons') drafts = list(r.table('drafts').run(conn, time_format='raw')) for draft in drafts: if 'process' not in draft: continue input_conditions = draft['process']['input_conditions'] if 'Specimen Prep' in input_conditions: sp = input_conditions['Specimen Prep'] for prop in sp['default_properties']: if prop['attribute'] == 'preparation': prop['value_choice'] = ["Electropolish", "FIB Liftout", "Other"] prop['unit_choice'] = [] prop['value'] = prop['unit'] prop['unit'] = "" r.table('drafts').get(draft['id']).update(draft).run(conn)
Add script to fix Emmanuelle's messed up drafts.#!/usr/bin/env python import rethinkdb as r import optparse if __name__ == "__main__": parser = optparse.OptionParser() parser.add_option("-P", "--port", dest="port", type="int", help="rethinkdb port", default=30815) (options, args) = parser.parse_args() conn = r.connect('localhost', options.port, db='materialscommons') drafts = list(r.table('drafts').run(conn, time_format='raw')) for draft in drafts: if 'process' not in draft: continue input_conditions = draft['process']['input_conditions'] if 'Specimen Prep' in input_conditions: sp = input_conditions['Specimen Prep'] for prop in sp['default_properties']: if prop['attribute'] == 'preparation': prop['value_choice'] = ["Electropolish", "FIB Liftout", "Other"] prop['unit_choice'] = [] prop['value'] = prop['unit'] prop['unit'] = "" r.table('drafts').get(draft['id']).update(draft).run(conn)
<commit_before><commit_msg>Add script to fix Emmanuelle's messed up drafts.<commit_after>#!/usr/bin/env python import rethinkdb as r import optparse if __name__ == "__main__": parser = optparse.OptionParser() parser.add_option("-P", "--port", dest="port", type="int", help="rethinkdb port", default=30815) (options, args) = parser.parse_args() conn = r.connect('localhost', options.port, db='materialscommons') drafts = list(r.table('drafts').run(conn, time_format='raw')) for draft in drafts: if 'process' not in draft: continue input_conditions = draft['process']['input_conditions'] if 'Specimen Prep' in input_conditions: sp = input_conditions['Specimen Prep'] for prop in sp['default_properties']: if prop['attribute'] == 'preparation': prop['value_choice'] = ["Electropolish", "FIB Liftout", "Other"] prop['unit_choice'] = [] prop['value'] = prop['unit'] prop['unit'] = "" r.table('drafts').get(draft['id']).update(draft).run(conn)
4890ec7d794918cb642a27778ac48ecdc28e02ab
pyfermod/ferfmrc.py
pyfermod/ferfmrc.py
''' Represents a forecast-model-run collection in Ferret ''' from __future__ import print_function import pyferret class FerFMRC(pyferret.FerAggDSet): ''' A forecast-model-run collection dataset in Ferret. Variables in this dataset (FerFMRCVar objects) have restrictions on the time and forecast axes, and have special transformations to better view the quality of the forecast represented by the variable. ''' def __init__(self, name, dsets, title='', warn=True, hide=False): ''' Creates a forecast-model-run collection dataset in Ferret. Variables in the given datasets to be aggregated must have a time axes (forecasted time) whose values are offset but otherwise match where they overlap (a subset of thimes that advances along a single time axis that is the union of all dataset time axes). The datasets can be given in any order; they will be arranged to have monotonically increasing time subsets. The aggregated variables (FerFMRCVar objects) have special transformations to better view the quality of the forecast represented by this variable. ''' super(FerFMRCVar, self).__init__(name=name, dsets=dsets, along='F', title=title, warn=warn, hide=hide)
Add initial FerFMRC object - very incomplete
Add initial FerFMRC object - very incomplete git-svn-id: 77c6ae948ca0f9b1480c5330e5e23bcf0afa938f@23147 fdbf22ae-c210-0410-be80-ca943da6b8f8
Python
unlicense
NOAA-PMEL/PyFerret,NOAA-PMEL/PyFerret,NOAA-PMEL/PyFerret,NOAA-PMEL/PyFerret,NOAA-PMEL/PyFerret
Add initial FerFMRC object - very incomplete git-svn-id: 77c6ae948ca0f9b1480c5330e5e23bcf0afa938f@23147 fdbf22ae-c210-0410-be80-ca943da6b8f8
''' Represents a forecast-model-run collection in Ferret ''' from __future__ import print_function import pyferret class FerFMRC(pyferret.FerAggDSet): ''' A forecast-model-run collection dataset in Ferret. Variables in this dataset (FerFMRCVar objects) have restrictions on the time and forecast axes, and have special transformations to better view the quality of the forecast represented by the variable. ''' def __init__(self, name, dsets, title='', warn=True, hide=False): ''' Creates a forecast-model-run collection dataset in Ferret. Variables in the given datasets to be aggregated must have a time axes (forecasted time) whose values are offset but otherwise match where they overlap (a subset of thimes that advances along a single time axis that is the union of all dataset time axes). The datasets can be given in any order; they will be arranged to have monotonically increasing time subsets. The aggregated variables (FerFMRCVar objects) have special transformations to better view the quality of the forecast represented by this variable. ''' super(FerFMRCVar, self).__init__(name=name, dsets=dsets, along='F', title=title, warn=warn, hide=hide)
<commit_before><commit_msg>Add initial FerFMRC object - very incomplete git-svn-id: 77c6ae948ca0f9b1480c5330e5e23bcf0afa938f@23147 fdbf22ae-c210-0410-be80-ca943da6b8f8<commit_after>
''' Represents a forecast-model-run collection in Ferret ''' from __future__ import print_function import pyferret class FerFMRC(pyferret.FerAggDSet): ''' A forecast-model-run collection dataset in Ferret. Variables in this dataset (FerFMRCVar objects) have restrictions on the time and forecast axes, and have special transformations to better view the quality of the forecast represented by the variable. ''' def __init__(self, name, dsets, title='', warn=True, hide=False): ''' Creates a forecast-model-run collection dataset in Ferret. Variables in the given datasets to be aggregated must have a time axes (forecasted time) whose values are offset but otherwise match where they overlap (a subset of thimes that advances along a single time axis that is the union of all dataset time axes). The datasets can be given in any order; they will be arranged to have monotonically increasing time subsets. The aggregated variables (FerFMRCVar objects) have special transformations to better view the quality of the forecast represented by this variable. ''' super(FerFMRCVar, self).__init__(name=name, dsets=dsets, along='F', title=title, warn=warn, hide=hide)
Add initial FerFMRC object - very incomplete git-svn-id: 77c6ae948ca0f9b1480c5330e5e23bcf0afa938f@23147 fdbf22ae-c210-0410-be80-ca943da6b8f8''' Represents a forecast-model-run collection in Ferret ''' from __future__ import print_function import pyferret class FerFMRC(pyferret.FerAggDSet): ''' A forecast-model-run collection dataset in Ferret. Variables in this dataset (FerFMRCVar objects) have restrictions on the time and forecast axes, and have special transformations to better view the quality of the forecast represented by the variable. ''' def __init__(self, name, dsets, title='', warn=True, hide=False): ''' Creates a forecast-model-run collection dataset in Ferret. Variables in the given datasets to be aggregated must have a time axes (forecasted time) whose values are offset but otherwise match where they overlap (a subset of thimes that advances along a single time axis that is the union of all dataset time axes). The datasets can be given in any order; they will be arranged to have monotonically increasing time subsets. The aggregated variables (FerFMRCVar objects) have special transformations to better view the quality of the forecast represented by this variable. ''' super(FerFMRCVar, self).__init__(name=name, dsets=dsets, along='F', title=title, warn=warn, hide=hide)
<commit_before><commit_msg>Add initial FerFMRC object - very incomplete git-svn-id: 77c6ae948ca0f9b1480c5330e5e23bcf0afa938f@23147 fdbf22ae-c210-0410-be80-ca943da6b8f8<commit_after>''' Represents a forecast-model-run collection in Ferret ''' from __future__ import print_function import pyferret class FerFMRC(pyferret.FerAggDSet): ''' A forecast-model-run collection dataset in Ferret. Variables in this dataset (FerFMRCVar objects) have restrictions on the time and forecast axes, and have special transformations to better view the quality of the forecast represented by the variable. ''' def __init__(self, name, dsets, title='', warn=True, hide=False): ''' Creates a forecast-model-run collection dataset in Ferret. Variables in the given datasets to be aggregated must have a time axes (forecasted time) whose values are offset but otherwise match where they overlap (a subset of thimes that advances along a single time axis that is the union of all dataset time axes). The datasets can be given in any order; they will be arranged to have monotonically increasing time subsets. The aggregated variables (FerFMRCVar objects) have special transformations to better view the quality of the forecast represented by this variable. ''' super(FerFMRCVar, self).__init__(name=name, dsets=dsets, along='F', title=title, warn=warn, hide=hide)
35e9088db9c47ab9e59489c23d6975c5747c43b8
studygroups/migrations/0107_auto_20181115_1004.py
studygroups/migrations/0107_auto_20181115_1004.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.6 on 2018-11-15 10:04 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('studygroups', '0106_studygroup_country_en'), ] operations = [ migrations.AlterField( model_name='studygroup', name='country_en', field=models.CharField(blank=True, default='', max_length=256), preserve_default=False, ), ]
Add migration for remove blank=True on country_en field
Add migration for remove blank=True on country_en field
Python
mit
p2pu/learning-circles,p2pu/learning-circles,p2pu/learning-circles,p2pu/learning-circles
Add migration for remove blank=True on country_en field
# -*- coding: utf-8 -*- # Generated by Django 1.11.6 on 2018-11-15 10:04 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('studygroups', '0106_studygroup_country_en'), ] operations = [ migrations.AlterField( model_name='studygroup', name='country_en', field=models.CharField(blank=True, default='', max_length=256), preserve_default=False, ), ]
<commit_before><commit_msg>Add migration for remove blank=True on country_en field<commit_after>
# -*- coding: utf-8 -*- # Generated by Django 1.11.6 on 2018-11-15 10:04 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('studygroups', '0106_studygroup_country_en'), ] operations = [ migrations.AlterField( model_name='studygroup', name='country_en', field=models.CharField(blank=True, default='', max_length=256), preserve_default=False, ), ]
Add migration for remove blank=True on country_en field# -*- coding: utf-8 -*- # Generated by Django 1.11.6 on 2018-11-15 10:04 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('studygroups', '0106_studygroup_country_en'), ] operations = [ migrations.AlterField( model_name='studygroup', name='country_en', field=models.CharField(blank=True, default='', max_length=256), preserve_default=False, ), ]
<commit_before><commit_msg>Add migration for remove blank=True on country_en field<commit_after># -*- coding: utf-8 -*- # Generated by Django 1.11.6 on 2018-11-15 10:04 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('studygroups', '0106_studygroup_country_en'), ] operations = [ migrations.AlterField( model_name='studygroup', name='country_en', field=models.CharField(blank=True, default='', max_length=256), preserve_default=False, ), ]
3b3c3c68c52f586db0be44e22fe5ba9400c5e53c
familias/migrations/0008_auto_20170303_2331.py
familias/migrations/0008_auto_20170303_2331.py
# -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2017-03-03 23:31 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('familias', '0007_merge_20170228_1344'), ] operations = [ migrations.AlterField( model_name='familia', name='estado_civil', field=models.TextField(choices=[('soltero', 'Soltero'), ('viudo', 'Viudo'), ('union_libre', 'Unión Libre'), ('casado_civil', 'Casado-Civil'), ('casado_iglesia', 'Casado-Iglesia'), ('vuelto_a_casar', 'Divorciado Vuelto a Casar')]), ), migrations.AlterField( model_name='familia', name='localidad', field=models.TextField(choices=[('poblado_jurica', 'Poblado Juríca'), ('nabo', 'Nabo'), ('salitre', 'Salitre'), ('la_campana', 'La Campana'), ('otro', 'Otro')]), ), migrations.AlterField( model_name='integrante', name='nivel_estudios', field=models.TextField(choices=[('ninguno', 'Ninguno'), ('1_grado', 'Primero de Primaria'), ('2_grado', 'Segundo de Primaria'), ('3_grado', 'Tercero de Primaria'), ('4_grado', 'Cuarto de Primaria'), ('5_grado', 'Quinto de Primaria'), ('6_grado', 'Sexto de Primaria'), ('7_grado', 'Primero de Secundaria'), ('8_grado', 'Segundo de Secundaria'), ('9_grado', 'Tercero de Secundaria'), ('10_grado', 'Primero de Pecundaria'), ('11_grado', 'Segundo de Preparatoria'), ('12_grado', 'Tercero de Preparatoria'), ('universidad', 'Universidad'), ('maestria', 'Maestría'), ('doctorado', 'Doctorado')]), ), ]
Create appropriate migrations for changes in models
Create appropriate migrations for changes in models
Python
mit
erikiado/jp2_online,erikiado/jp2_online,erikiado/jp2_online
Create appropriate migrations for changes in models
# -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2017-03-03 23:31 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('familias', '0007_merge_20170228_1344'), ] operations = [ migrations.AlterField( model_name='familia', name='estado_civil', field=models.TextField(choices=[('soltero', 'Soltero'), ('viudo', 'Viudo'), ('union_libre', 'Unión Libre'), ('casado_civil', 'Casado-Civil'), ('casado_iglesia', 'Casado-Iglesia'), ('vuelto_a_casar', 'Divorciado Vuelto a Casar')]), ), migrations.AlterField( model_name='familia', name='localidad', field=models.TextField(choices=[('poblado_jurica', 'Poblado Juríca'), ('nabo', 'Nabo'), ('salitre', 'Salitre'), ('la_campana', 'La Campana'), ('otro', 'Otro')]), ), migrations.AlterField( model_name='integrante', name='nivel_estudios', field=models.TextField(choices=[('ninguno', 'Ninguno'), ('1_grado', 'Primero de Primaria'), ('2_grado', 'Segundo de Primaria'), ('3_grado', 'Tercero de Primaria'), ('4_grado', 'Cuarto de Primaria'), ('5_grado', 'Quinto de Primaria'), ('6_grado', 'Sexto de Primaria'), ('7_grado', 'Primero de Secundaria'), ('8_grado', 'Segundo de Secundaria'), ('9_grado', 'Tercero de Secundaria'), ('10_grado', 'Primero de Pecundaria'), ('11_grado', 'Segundo de Preparatoria'), ('12_grado', 'Tercero de Preparatoria'), ('universidad', 'Universidad'), ('maestria', 'Maestría'), ('doctorado', 'Doctorado')]), ), ]
<commit_before><commit_msg>Create appropriate migrations for changes in models<commit_after>
# -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2017-03-03 23:31 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('familias', '0007_merge_20170228_1344'), ] operations = [ migrations.AlterField( model_name='familia', name='estado_civil', field=models.TextField(choices=[('soltero', 'Soltero'), ('viudo', 'Viudo'), ('union_libre', 'Unión Libre'), ('casado_civil', 'Casado-Civil'), ('casado_iglesia', 'Casado-Iglesia'), ('vuelto_a_casar', 'Divorciado Vuelto a Casar')]), ), migrations.AlterField( model_name='familia', name='localidad', field=models.TextField(choices=[('poblado_jurica', 'Poblado Juríca'), ('nabo', 'Nabo'), ('salitre', 'Salitre'), ('la_campana', 'La Campana'), ('otro', 'Otro')]), ), migrations.AlterField( model_name='integrante', name='nivel_estudios', field=models.TextField(choices=[('ninguno', 'Ninguno'), ('1_grado', 'Primero de Primaria'), ('2_grado', 'Segundo de Primaria'), ('3_grado', 'Tercero de Primaria'), ('4_grado', 'Cuarto de Primaria'), ('5_grado', 'Quinto de Primaria'), ('6_grado', 'Sexto de Primaria'), ('7_grado', 'Primero de Secundaria'), ('8_grado', 'Segundo de Secundaria'), ('9_grado', 'Tercero de Secundaria'), ('10_grado', 'Primero de Pecundaria'), ('11_grado', 'Segundo de Preparatoria'), ('12_grado', 'Tercero de Preparatoria'), ('universidad', 'Universidad'), ('maestria', 'Maestría'), ('doctorado', 'Doctorado')]), ), ]
Create appropriate migrations for changes in models# -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2017-03-03 23:31 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('familias', '0007_merge_20170228_1344'), ] operations = [ migrations.AlterField( model_name='familia', name='estado_civil', field=models.TextField(choices=[('soltero', 'Soltero'), ('viudo', 'Viudo'), ('union_libre', 'Unión Libre'), ('casado_civil', 'Casado-Civil'), ('casado_iglesia', 'Casado-Iglesia'), ('vuelto_a_casar', 'Divorciado Vuelto a Casar')]), ), migrations.AlterField( model_name='familia', name='localidad', field=models.TextField(choices=[('poblado_jurica', 'Poblado Juríca'), ('nabo', 'Nabo'), ('salitre', 'Salitre'), ('la_campana', 'La Campana'), ('otro', 'Otro')]), ), migrations.AlterField( model_name='integrante', name='nivel_estudios', field=models.TextField(choices=[('ninguno', 'Ninguno'), ('1_grado', 'Primero de Primaria'), ('2_grado', 'Segundo de Primaria'), ('3_grado', 'Tercero de Primaria'), ('4_grado', 'Cuarto de Primaria'), ('5_grado', 'Quinto de Primaria'), ('6_grado', 'Sexto de Primaria'), ('7_grado', 'Primero de Secundaria'), ('8_grado', 'Segundo de Secundaria'), ('9_grado', 'Tercero de Secundaria'), ('10_grado', 'Primero de Pecundaria'), ('11_grado', 'Segundo de Preparatoria'), ('12_grado', 'Tercero de Preparatoria'), ('universidad', 'Universidad'), ('maestria', 'Maestría'), ('doctorado', 'Doctorado')]), ), ]
<commit_before><commit_msg>Create appropriate migrations for changes in models<commit_after># -*- coding: utf-8 -*- # Generated by Django 1.10.2 on 2017-03-03 23:31 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('familias', '0007_merge_20170228_1344'), ] operations = [ migrations.AlterField( model_name='familia', name='estado_civil', field=models.TextField(choices=[('soltero', 'Soltero'), ('viudo', 'Viudo'), ('union_libre', 'Unión Libre'), ('casado_civil', 'Casado-Civil'), ('casado_iglesia', 'Casado-Iglesia'), ('vuelto_a_casar', 'Divorciado Vuelto a Casar')]), ), migrations.AlterField( model_name='familia', name='localidad', field=models.TextField(choices=[('poblado_jurica', 'Poblado Juríca'), ('nabo', 'Nabo'), ('salitre', 'Salitre'), ('la_campana', 'La Campana'), ('otro', 'Otro')]), ), migrations.AlterField( model_name='integrante', name='nivel_estudios', field=models.TextField(choices=[('ninguno', 'Ninguno'), ('1_grado', 'Primero de Primaria'), ('2_grado', 'Segundo de Primaria'), ('3_grado', 'Tercero de Primaria'), ('4_grado', 'Cuarto de Primaria'), ('5_grado', 'Quinto de Primaria'), ('6_grado', 'Sexto de Primaria'), ('7_grado', 'Primero de Secundaria'), ('8_grado', 'Segundo de Secundaria'), ('9_grado', 'Tercero de Secundaria'), ('10_grado', 'Primero de Pecundaria'), ('11_grado', 'Segundo de Preparatoria'), ('12_grado', 'Tercero de Preparatoria'), ('universidad', 'Universidad'), ('maestria', 'Maestría'), ('doctorado', 'Doctorado')]), ), ]
479275674916e45c0a2b70a372962f3d0c271e4f
SatNOGS/base/management/commands/update_all_tle.py
SatNOGS/base/management/commands/update_all_tle.py
from orbit import satellite from django.core.management.base import BaseCommand from base.utils import update_all_satellites from base.models import Satellite class Command(BaseCommand): help = 'Create initial fixtures' def handle(self, *args, **options): satellites = Satellite.objets.all() for obj in satellites: try: sat = satellite(obj.norad_cat_id) except: self.stdout.write(('Satellite {} with Identifier {} does ' 'not exist').format(obj.name, obj.norad_cat_id)) continue obj.name = sat.name() tle = sat.tle() obj.tle0 = tle[0] obj.tle1 = tle[1] obj.tle2 = tle[2] obj.save() self.stdout.write(('Satellite {} with Identifier {} ' 'found [updated]').format(obj.norad_cat_id, obj.name))
Add management command to update all existing satellite tle data
Add management command to update all existing satellite tle data
Python
agpl-3.0
cshields/satnogs-network,cshields/satnogs-network,cshields/satnogs-network,cshields/satnogs-network
Add management command to update all existing satellite tle data
from orbit import satellite from django.core.management.base import BaseCommand from base.utils import update_all_satellites from base.models import Satellite class Command(BaseCommand): help = 'Create initial fixtures' def handle(self, *args, **options): satellites = Satellite.objets.all() for obj in satellites: try: sat = satellite(obj.norad_cat_id) except: self.stdout.write(('Satellite {} with Identifier {} does ' 'not exist').format(obj.name, obj.norad_cat_id)) continue obj.name = sat.name() tle = sat.tle() obj.tle0 = tle[0] obj.tle1 = tle[1] obj.tle2 = tle[2] obj.save() self.stdout.write(('Satellite {} with Identifier {} ' 'found [updated]').format(obj.norad_cat_id, obj.name))
<commit_before><commit_msg>Add management command to update all existing satellite tle data<commit_after>
from orbit import satellite from django.core.management.base import BaseCommand from base.utils import update_all_satellites from base.models import Satellite class Command(BaseCommand): help = 'Create initial fixtures' def handle(self, *args, **options): satellites = Satellite.objets.all() for obj in satellites: try: sat = satellite(obj.norad_cat_id) except: self.stdout.write(('Satellite {} with Identifier {} does ' 'not exist').format(obj.name, obj.norad_cat_id)) continue obj.name = sat.name() tle = sat.tle() obj.tle0 = tle[0] obj.tle1 = tle[1] obj.tle2 = tle[2] obj.save() self.stdout.write(('Satellite {} with Identifier {} ' 'found [updated]').format(obj.norad_cat_id, obj.name))
Add management command to update all existing satellite tle datafrom orbit import satellite from django.core.management.base import BaseCommand from base.utils import update_all_satellites from base.models import Satellite class Command(BaseCommand): help = 'Create initial fixtures' def handle(self, *args, **options): satellites = Satellite.objets.all() for obj in satellites: try: sat = satellite(obj.norad_cat_id) except: self.stdout.write(('Satellite {} with Identifier {} does ' 'not exist').format(obj.name, obj.norad_cat_id)) continue obj.name = sat.name() tle = sat.tle() obj.tle0 = tle[0] obj.tle1 = tle[1] obj.tle2 = tle[2] obj.save() self.stdout.write(('Satellite {} with Identifier {} ' 'found [updated]').format(obj.norad_cat_id, obj.name))
<commit_before><commit_msg>Add management command to update all existing satellite tle data<commit_after>from orbit import satellite from django.core.management.base import BaseCommand from base.utils import update_all_satellites from base.models import Satellite class Command(BaseCommand): help = 'Create initial fixtures' def handle(self, *args, **options): satellites = Satellite.objets.all() for obj in satellites: try: sat = satellite(obj.norad_cat_id) except: self.stdout.write(('Satellite {} with Identifier {} does ' 'not exist').format(obj.name, obj.norad_cat_id)) continue obj.name = sat.name() tle = sat.tle() obj.tle0 = tle[0] obj.tle1 = tle[1] obj.tle2 = tle[2] obj.save() self.stdout.write(('Satellite {} with Identifier {} ' 'found [updated]').format(obj.norad_cat_id, obj.name))
9baf9ede15fa988b5da711605b67cc5bbbbc5b36
wanorlan/wanorlan.py
wanorlan/wanorlan.py
import time import datetime import subprocess import json import sys import re PING_RTT_REGEX = re.compile('rtt.+=\s*([\d.]+)') def get_status(ip, timeout): t0 = time.time() error = subprocess.call(['ping', '-c', '1', '-W', str(timeout), ip], stdout=sys.stderr.fileno(), stderr=sys.stderr.fileno()) delay = time.time() - t0 return None if error else delay def log_status(hosts, timeout): now = datetime.datetime.now().isoformat(timespec='seconds') processes = [] for host in hosts: processes.append(subprocess.Popen( ['ping', '-qnc', '1', '-W', str(timeout), host], stdout=subprocess.PIPE)) results = dict(time=now) for host, process in zip(hosts, processes): if process.wait(): results[host] = None else: last_line = list(process.stdout)[-1].strip().decode('utf8') results[host] = float(PING_RTT_REGEX.match(last_line).group(1)) return results TIMEOUT = 2 INTERVAL = 15 HOSTS = ['localhost', '192.168.1.1', '1.1.1.1', '8.8.8.8', 'www.google.co.uk'] if __name__ == '__main__': t0 = time.time() while True: time.sleep(max(0, t0 + INTERVAL - time.time())) t0 = time.time() print(json.dumps(log_status(HOSTS, timeout=TIMEOUT)), flush=True)
Add simple script for logging to diagnose WAN vs LAN connection issues
Add simple script for logging to diagnose WAN vs LAN connection issues
Python
mit
DouglasOrr/Snippets,DouglasOrr/Snippets,DouglasOrr/Snippets,DouglasOrr/Snippets,DouglasOrr/Snippets,DouglasOrr/Snippets,DouglasOrr/Snippets,DouglasOrr/Snippets
Add simple script for logging to diagnose WAN vs LAN connection issues
import time import datetime import subprocess import json import sys import re PING_RTT_REGEX = re.compile('rtt.+=\s*([\d.]+)') def get_status(ip, timeout): t0 = time.time() error = subprocess.call(['ping', '-c', '1', '-W', str(timeout), ip], stdout=sys.stderr.fileno(), stderr=sys.stderr.fileno()) delay = time.time() - t0 return None if error else delay def log_status(hosts, timeout): now = datetime.datetime.now().isoformat(timespec='seconds') processes = [] for host in hosts: processes.append(subprocess.Popen( ['ping', '-qnc', '1', '-W', str(timeout), host], stdout=subprocess.PIPE)) results = dict(time=now) for host, process in zip(hosts, processes): if process.wait(): results[host] = None else: last_line = list(process.stdout)[-1].strip().decode('utf8') results[host] = float(PING_RTT_REGEX.match(last_line).group(1)) return results TIMEOUT = 2 INTERVAL = 15 HOSTS = ['localhost', '192.168.1.1', '1.1.1.1', '8.8.8.8', 'www.google.co.uk'] if __name__ == '__main__': t0 = time.time() while True: time.sleep(max(0, t0 + INTERVAL - time.time())) t0 = time.time() print(json.dumps(log_status(HOSTS, timeout=TIMEOUT)), flush=True)
<commit_before><commit_msg>Add simple script for logging to diagnose WAN vs LAN connection issues<commit_after>
import time import datetime import subprocess import json import sys import re PING_RTT_REGEX = re.compile('rtt.+=\s*([\d.]+)') def get_status(ip, timeout): t0 = time.time() error = subprocess.call(['ping', '-c', '1', '-W', str(timeout), ip], stdout=sys.stderr.fileno(), stderr=sys.stderr.fileno()) delay = time.time() - t0 return None if error else delay def log_status(hosts, timeout): now = datetime.datetime.now().isoformat(timespec='seconds') processes = [] for host in hosts: processes.append(subprocess.Popen( ['ping', '-qnc', '1', '-W', str(timeout), host], stdout=subprocess.PIPE)) results = dict(time=now) for host, process in zip(hosts, processes): if process.wait(): results[host] = None else: last_line = list(process.stdout)[-1].strip().decode('utf8') results[host] = float(PING_RTT_REGEX.match(last_line).group(1)) return results TIMEOUT = 2 INTERVAL = 15 HOSTS = ['localhost', '192.168.1.1', '1.1.1.1', '8.8.8.8', 'www.google.co.uk'] if __name__ == '__main__': t0 = time.time() while True: time.sleep(max(0, t0 + INTERVAL - time.time())) t0 = time.time() print(json.dumps(log_status(HOSTS, timeout=TIMEOUT)), flush=True)
Add simple script for logging to diagnose WAN vs LAN connection issuesimport time import datetime import subprocess import json import sys import re PING_RTT_REGEX = re.compile('rtt.+=\s*([\d.]+)') def get_status(ip, timeout): t0 = time.time() error = subprocess.call(['ping', '-c', '1', '-W', str(timeout), ip], stdout=sys.stderr.fileno(), stderr=sys.stderr.fileno()) delay = time.time() - t0 return None if error else delay def log_status(hosts, timeout): now = datetime.datetime.now().isoformat(timespec='seconds') processes = [] for host in hosts: processes.append(subprocess.Popen( ['ping', '-qnc', '1', '-W', str(timeout), host], stdout=subprocess.PIPE)) results = dict(time=now) for host, process in zip(hosts, processes): if process.wait(): results[host] = None else: last_line = list(process.stdout)[-1].strip().decode('utf8') results[host] = float(PING_RTT_REGEX.match(last_line).group(1)) return results TIMEOUT = 2 INTERVAL = 15 HOSTS = ['localhost', '192.168.1.1', '1.1.1.1', '8.8.8.8', 'www.google.co.uk'] if __name__ == '__main__': t0 = time.time() while True: time.sleep(max(0, t0 + INTERVAL - time.time())) t0 = time.time() print(json.dumps(log_status(HOSTS, timeout=TIMEOUT)), flush=True)
<commit_before><commit_msg>Add simple script for logging to diagnose WAN vs LAN connection issues<commit_after>import time import datetime import subprocess import json import sys import re PING_RTT_REGEX = re.compile('rtt.+=\s*([\d.]+)') def get_status(ip, timeout): t0 = time.time() error = subprocess.call(['ping', '-c', '1', '-W', str(timeout), ip], stdout=sys.stderr.fileno(), stderr=sys.stderr.fileno()) delay = time.time() - t0 return None if error else delay def log_status(hosts, timeout): now = datetime.datetime.now().isoformat(timespec='seconds') processes = [] for host in hosts: processes.append(subprocess.Popen( ['ping', '-qnc', '1', '-W', str(timeout), host], stdout=subprocess.PIPE)) results = dict(time=now) for host, process in zip(hosts, processes): if process.wait(): results[host] = None else: last_line = list(process.stdout)[-1].strip().decode('utf8') results[host] = float(PING_RTT_REGEX.match(last_line).group(1)) return results TIMEOUT = 2 INTERVAL = 15 HOSTS = ['localhost', '192.168.1.1', '1.1.1.1', '8.8.8.8', 'www.google.co.uk'] if __name__ == '__main__': t0 = time.time() while True: time.sleep(max(0, t0 + INTERVAL - time.time())) t0 = time.time() print(json.dumps(log_status(HOSTS, timeout=TIMEOUT)), flush=True)
28067277adb659465f1762bd1437a45404fe15e6
cobs2/_cobspy.py
cobs2/_cobspy.py
""" Consistent Overhead Byte Stuffing (COBS) This version is for Python 2.x. """ class DecodeError(Exception): pass def encode(in_bytes): """Encode a string using Consistent Overhead Byte Stuffing (COBS). Input is any byte string. Output is also a byte string. Encoding guarantees no zero bytes in the output. The output string will be expanded slightly, by a predictable amount. An empty string is encoded to '\\x01'""" if isinstance(in_bytes, unicode): raise TypeError('Unicode-objects are not supported; string objects only') final_zero = True out_bytes = [] idx = 0 search_start_idx = 0 for in_char in in_bytes: if in_char == '\x00': final_zero = True out_bytes.append(chr(idx - search_start_idx + 1)) out_bytes.append(in_bytes[search_start_idx:idx]) search_start_idx = idx + 1 else: if idx - search_start_idx == 0xFD: final_zero = False out_bytes.append('\xFF') out_bytes.append(in_bytes[search_start_idx:idx+1]) search_start_idx = idx + 1 idx += 1 if idx != search_start_idx or final_zero: out_bytes.append(chr(idx - search_start_idx + 1)) out_bytes.append(in_bytes[search_start_idx:idx]) return ''.join(out_bytes) def decode(in_bytes): """Decode a string using Consistent Overhead Byte Stuffing (COBS). Input should be a byte string that has been COBS encoded. Output is also a byte string. A cobs.DecodeError exception may be raised if the encoded data is invalid.""" if isinstance(in_bytes, unicode): raise TypeError('Unicode-objects are not supported; string objects only') out_bytes = [] idx = 0 if len(in_bytes) > 0: while True: length = ord(in_bytes[idx]) if length == 0: raise DecodeError("zero byte found in input") idx += 1 end = idx + length - 1 copy_bytes = in_bytes[idx:end] if '\x00' in copy_bytes: raise DecodeError("zero byte found in input") out_bytes.append(copy_bytes) idx = end if idx > len(in_bytes): raise DecodeError("not enough input bytes for length code") if idx < len(in_bytes): if length < 0xFF: out_bytes.append('\x00') else: break return ''.join(out_bytes)
Add Python 2.x pure Python implementation.
Add Python 2.x pure Python implementation.
Python
mit
cmcqueen/cobs-python,cmcqueen/cobs-python
Add Python 2.x pure Python implementation.
""" Consistent Overhead Byte Stuffing (COBS) This version is for Python 2.x. """ class DecodeError(Exception): pass def encode(in_bytes): """Encode a string using Consistent Overhead Byte Stuffing (COBS). Input is any byte string. Output is also a byte string. Encoding guarantees no zero bytes in the output. The output string will be expanded slightly, by a predictable amount. An empty string is encoded to '\\x01'""" if isinstance(in_bytes, unicode): raise TypeError('Unicode-objects are not supported; string objects only') final_zero = True out_bytes = [] idx = 0 search_start_idx = 0 for in_char in in_bytes: if in_char == '\x00': final_zero = True out_bytes.append(chr(idx - search_start_idx + 1)) out_bytes.append(in_bytes[search_start_idx:idx]) search_start_idx = idx + 1 else: if idx - search_start_idx == 0xFD: final_zero = False out_bytes.append('\xFF') out_bytes.append(in_bytes[search_start_idx:idx+1]) search_start_idx = idx + 1 idx += 1 if idx != search_start_idx or final_zero: out_bytes.append(chr(idx - search_start_idx + 1)) out_bytes.append(in_bytes[search_start_idx:idx]) return ''.join(out_bytes) def decode(in_bytes): """Decode a string using Consistent Overhead Byte Stuffing (COBS). Input should be a byte string that has been COBS encoded. Output is also a byte string. A cobs.DecodeError exception may be raised if the encoded data is invalid.""" if isinstance(in_bytes, unicode): raise TypeError('Unicode-objects are not supported; string objects only') out_bytes = [] idx = 0 if len(in_bytes) > 0: while True: length = ord(in_bytes[idx]) if length == 0: raise DecodeError("zero byte found in input") idx += 1 end = idx + length - 1 copy_bytes = in_bytes[idx:end] if '\x00' in copy_bytes: raise DecodeError("zero byte found in input") out_bytes.append(copy_bytes) idx = end if idx > len(in_bytes): raise DecodeError("not enough input bytes for length code") if idx < len(in_bytes): if length < 0xFF: out_bytes.append('\x00') else: break return ''.join(out_bytes)
<commit_before><commit_msg>Add Python 2.x pure Python implementation.<commit_after>
""" Consistent Overhead Byte Stuffing (COBS) This version is for Python 2.x. """ class DecodeError(Exception): pass def encode(in_bytes): """Encode a string using Consistent Overhead Byte Stuffing (COBS). Input is any byte string. Output is also a byte string. Encoding guarantees no zero bytes in the output. The output string will be expanded slightly, by a predictable amount. An empty string is encoded to '\\x01'""" if isinstance(in_bytes, unicode): raise TypeError('Unicode-objects are not supported; string objects only') final_zero = True out_bytes = [] idx = 0 search_start_idx = 0 for in_char in in_bytes: if in_char == '\x00': final_zero = True out_bytes.append(chr(idx - search_start_idx + 1)) out_bytes.append(in_bytes[search_start_idx:idx]) search_start_idx = idx + 1 else: if idx - search_start_idx == 0xFD: final_zero = False out_bytes.append('\xFF') out_bytes.append(in_bytes[search_start_idx:idx+1]) search_start_idx = idx + 1 idx += 1 if idx != search_start_idx or final_zero: out_bytes.append(chr(idx - search_start_idx + 1)) out_bytes.append(in_bytes[search_start_idx:idx]) return ''.join(out_bytes) def decode(in_bytes): """Decode a string using Consistent Overhead Byte Stuffing (COBS). Input should be a byte string that has been COBS encoded. Output is also a byte string. A cobs.DecodeError exception may be raised if the encoded data is invalid.""" if isinstance(in_bytes, unicode): raise TypeError('Unicode-objects are not supported; string objects only') out_bytes = [] idx = 0 if len(in_bytes) > 0: while True: length = ord(in_bytes[idx]) if length == 0: raise DecodeError("zero byte found in input") idx += 1 end = idx + length - 1 copy_bytes = in_bytes[idx:end] if '\x00' in copy_bytes: raise DecodeError("zero byte found in input") out_bytes.append(copy_bytes) idx = end if idx > len(in_bytes): raise DecodeError("not enough input bytes for length code") if idx < len(in_bytes): if length < 0xFF: out_bytes.append('\x00') else: break return ''.join(out_bytes)
Add Python 2.x pure Python implementation.""" Consistent Overhead Byte Stuffing (COBS) This version is for Python 2.x. """ class DecodeError(Exception): pass def encode(in_bytes): """Encode a string using Consistent Overhead Byte Stuffing (COBS). Input is any byte string. Output is also a byte string. Encoding guarantees no zero bytes in the output. The output string will be expanded slightly, by a predictable amount. An empty string is encoded to '\\x01'""" if isinstance(in_bytes, unicode): raise TypeError('Unicode-objects are not supported; string objects only') final_zero = True out_bytes = [] idx = 0 search_start_idx = 0 for in_char in in_bytes: if in_char == '\x00': final_zero = True out_bytes.append(chr(idx - search_start_idx + 1)) out_bytes.append(in_bytes[search_start_idx:idx]) search_start_idx = idx + 1 else: if idx - search_start_idx == 0xFD: final_zero = False out_bytes.append('\xFF') out_bytes.append(in_bytes[search_start_idx:idx+1]) search_start_idx = idx + 1 idx += 1 if idx != search_start_idx or final_zero: out_bytes.append(chr(idx - search_start_idx + 1)) out_bytes.append(in_bytes[search_start_idx:idx]) return ''.join(out_bytes) def decode(in_bytes): """Decode a string using Consistent Overhead Byte Stuffing (COBS). Input should be a byte string that has been COBS encoded. Output is also a byte string. A cobs.DecodeError exception may be raised if the encoded data is invalid.""" if isinstance(in_bytes, unicode): raise TypeError('Unicode-objects are not supported; string objects only') out_bytes = [] idx = 0 if len(in_bytes) > 0: while True: length = ord(in_bytes[idx]) if length == 0: raise DecodeError("zero byte found in input") idx += 1 end = idx + length - 1 copy_bytes = in_bytes[idx:end] if '\x00' in copy_bytes: raise DecodeError("zero byte found in input") out_bytes.append(copy_bytes) idx = end if idx > len(in_bytes): raise DecodeError("not enough input bytes for length code") if idx < len(in_bytes): if length < 0xFF: out_bytes.append('\x00') else: break return ''.join(out_bytes)
<commit_before><commit_msg>Add Python 2.x pure Python implementation.<commit_after>""" Consistent Overhead Byte Stuffing (COBS) This version is for Python 2.x. """ class DecodeError(Exception): pass def encode(in_bytes): """Encode a string using Consistent Overhead Byte Stuffing (COBS). Input is any byte string. Output is also a byte string. Encoding guarantees no zero bytes in the output. The output string will be expanded slightly, by a predictable amount. An empty string is encoded to '\\x01'""" if isinstance(in_bytes, unicode): raise TypeError('Unicode-objects are not supported; string objects only') final_zero = True out_bytes = [] idx = 0 search_start_idx = 0 for in_char in in_bytes: if in_char == '\x00': final_zero = True out_bytes.append(chr(idx - search_start_idx + 1)) out_bytes.append(in_bytes[search_start_idx:idx]) search_start_idx = idx + 1 else: if idx - search_start_idx == 0xFD: final_zero = False out_bytes.append('\xFF') out_bytes.append(in_bytes[search_start_idx:idx+1]) search_start_idx = idx + 1 idx += 1 if idx != search_start_idx or final_zero: out_bytes.append(chr(idx - search_start_idx + 1)) out_bytes.append(in_bytes[search_start_idx:idx]) return ''.join(out_bytes) def decode(in_bytes): """Decode a string using Consistent Overhead Byte Stuffing (COBS). Input should be a byte string that has been COBS encoded. Output is also a byte string. A cobs.DecodeError exception may be raised if the encoded data is invalid.""" if isinstance(in_bytes, unicode): raise TypeError('Unicode-objects are not supported; string objects only') out_bytes = [] idx = 0 if len(in_bytes) > 0: while True: length = ord(in_bytes[idx]) if length == 0: raise DecodeError("zero byte found in input") idx += 1 end = idx + length - 1 copy_bytes = in_bytes[idx:end] if '\x00' in copy_bytes: raise DecodeError("zero byte found in input") out_bytes.append(copy_bytes) idx = end if idx > len(in_bytes): raise DecodeError("not enough input bytes for length code") if idx < len(in_bytes): if length < 0xFF: out_bytes.append('\x00') else: break return ''.join(out_bytes)