commit
stringlengths
40
40
old_file
stringlengths
4
118
new_file
stringlengths
4
118
old_contents
stringlengths
0
2.94k
new_contents
stringlengths
1
4.43k
subject
stringlengths
15
444
message
stringlengths
16
3.45k
lang
stringclasses
1 value
license
stringclasses
13 values
repos
stringlengths
5
43.2k
prompt
stringlengths
17
4.58k
response
stringlengths
1
4.43k
prompt_tagged
stringlengths
58
4.62k
response_tagged
stringlengths
1
4.43k
text
stringlengths
132
7.29k
text_tagged
stringlengths
173
7.33k
4a83a84f5358482fab7d1b7b7e259aed2b958e5d
maas/client/viscera/tests/test_sshkeys.py
maas/client/viscera/tests/test_sshkeys.py
"""Test for `maas.client.viscera.sshkeys`.""" from maas.client.viscera import Origin from .. import sshkeys from ...testing import TestCase from ..testing import bind def make_origin(): return bind(sshkeys.SSHKeys, sshkeys.SSHKey) class TestSSHKeys(TestCase): def test__sshkeys_read(self): """ SSHKeys.read() returns all SSH keys. """ SSHKeys = make_origin().SSHKeys # create a list of dicts for read() to return self.assertThat(SSHKeys.read(), Equals()) def test__sshkeys_create(self): """ SSHKeys.create() returns a new SSHKey. """ SSHKeys = make_origin().SSHKeys SSHKeys._handler.create.return_value = { "id": 1, "key": "jeqqirevireveriv02329329mcie", "keysource": "", } SSHKeys._handler.create.assert_called_once_with( key="jeqqirevireveriv02329329mcie" ) class TestSSHKey(TestCase): def test__sshkey_read(self): """ SSHKey.read(id) returns a single SSHKey. """ SSHKey = make_origin().SSHKey sshkey = SSHKey() def test__sshkey_update(self): """ SSHKey.update(id, ...) returns an updated SSHKey. """
Add tests for ssh keys (first draft)
Add tests for ssh keys (first draft)
Python
agpl-3.0
alburnum/alburnum-maas-client,blakerouse/python-libmaas
Add tests for ssh keys (first draft)
"""Test for `maas.client.viscera.sshkeys`.""" from maas.client.viscera import Origin from .. import sshkeys from ...testing import TestCase from ..testing import bind def make_origin(): return bind(sshkeys.SSHKeys, sshkeys.SSHKey) class TestSSHKeys(TestCase): def test__sshkeys_read(self): """ SSHKeys.read() returns all SSH keys. """ SSHKeys = make_origin().SSHKeys # create a list of dicts for read() to return self.assertThat(SSHKeys.read(), Equals()) def test__sshkeys_create(self): """ SSHKeys.create() returns a new SSHKey. """ SSHKeys = make_origin().SSHKeys SSHKeys._handler.create.return_value = { "id": 1, "key": "jeqqirevireveriv02329329mcie", "keysource": "", } SSHKeys._handler.create.assert_called_once_with( key="jeqqirevireveriv02329329mcie" ) class TestSSHKey(TestCase): def test__sshkey_read(self): """ SSHKey.read(id) returns a single SSHKey. """ SSHKey = make_origin().SSHKey sshkey = SSHKey() def test__sshkey_update(self): """ SSHKey.update(id, ...) returns an updated SSHKey. """
<commit_before><commit_msg>Add tests for ssh keys (first draft)<commit_after>
"""Test for `maas.client.viscera.sshkeys`.""" from maas.client.viscera import Origin from .. import sshkeys from ...testing import TestCase from ..testing import bind def make_origin(): return bind(sshkeys.SSHKeys, sshkeys.SSHKey) class TestSSHKeys(TestCase): def test__sshkeys_read(self): """ SSHKeys.read() returns all SSH keys. """ SSHKeys = make_origin().SSHKeys # create a list of dicts for read() to return self.assertThat(SSHKeys.read(), Equals()) def test__sshkeys_create(self): """ SSHKeys.create() returns a new SSHKey. """ SSHKeys = make_origin().SSHKeys SSHKeys._handler.create.return_value = { "id": 1, "key": "jeqqirevireveriv02329329mcie", "keysource": "", } SSHKeys._handler.create.assert_called_once_with( key="jeqqirevireveriv02329329mcie" ) class TestSSHKey(TestCase): def test__sshkey_read(self): """ SSHKey.read(id) returns a single SSHKey. """ SSHKey = make_origin().SSHKey sshkey = SSHKey() def test__sshkey_update(self): """ SSHKey.update(id, ...) returns an updated SSHKey. """
Add tests for ssh keys (first draft)"""Test for `maas.client.viscera.sshkeys`.""" from maas.client.viscera import Origin from .. import sshkeys from ...testing import TestCase from ..testing import bind def make_origin(): return bind(sshkeys.SSHKeys, sshkeys.SSHKey) class TestSSHKeys(TestCase): def test__sshkeys_read(self): """ SSHKeys.read() returns all SSH keys. """ SSHKeys = make_origin().SSHKeys # create a list of dicts for read() to return self.assertThat(SSHKeys.read(), Equals()) def test__sshkeys_create(self): """ SSHKeys.create() returns a new SSHKey. """ SSHKeys = make_origin().SSHKeys SSHKeys._handler.create.return_value = { "id": 1, "key": "jeqqirevireveriv02329329mcie", "keysource": "", } SSHKeys._handler.create.assert_called_once_with( key="jeqqirevireveriv02329329mcie" ) class TestSSHKey(TestCase): def test__sshkey_read(self): """ SSHKey.read(id) returns a single SSHKey. """ SSHKey = make_origin().SSHKey sshkey = SSHKey() def test__sshkey_update(self): """ SSHKey.update(id, ...) returns an updated SSHKey. """
<commit_before><commit_msg>Add tests for ssh keys (first draft)<commit_after>"""Test for `maas.client.viscera.sshkeys`.""" from maas.client.viscera import Origin from .. import sshkeys from ...testing import TestCase from ..testing import bind def make_origin(): return bind(sshkeys.SSHKeys, sshkeys.SSHKey) class TestSSHKeys(TestCase): def test__sshkeys_read(self): """ SSHKeys.read() returns all SSH keys. """ SSHKeys = make_origin().SSHKeys # create a list of dicts for read() to return self.assertThat(SSHKeys.read(), Equals()) def test__sshkeys_create(self): """ SSHKeys.create() returns a new SSHKey. """ SSHKeys = make_origin().SSHKeys SSHKeys._handler.create.return_value = { "id": 1, "key": "jeqqirevireveriv02329329mcie", "keysource": "", } SSHKeys._handler.create.assert_called_once_with( key="jeqqirevireveriv02329329mcie" ) class TestSSHKey(TestCase): def test__sshkey_read(self): """ SSHKey.read(id) returns a single SSHKey. """ SSHKey = make_origin().SSHKey sshkey = SSHKey() def test__sshkey_update(self): """ SSHKey.update(id, ...) returns an updated SSHKey. """
a5bf6c1a329f5d1a85222f9abf6635b07722a8b1
tests/grammar_creation_test/__init__.py
tests/grammar_creation_test/__init__.py
#!/usr/bin/env python """ :Author Patrik Valkovic :Created 23.06.2017 16:39 :Licence GNUv3 Part of grammpy """
Add directory for grammar creation tests
Add directory for grammar creation tests
Python
mit
PatrikValkovic/grammpy
Add directory for grammar creation tests
#!/usr/bin/env python """ :Author Patrik Valkovic :Created 23.06.2017 16:39 :Licence GNUv3 Part of grammpy """
<commit_before><commit_msg>Add directory for grammar creation tests<commit_after>
#!/usr/bin/env python """ :Author Patrik Valkovic :Created 23.06.2017 16:39 :Licence GNUv3 Part of grammpy """
Add directory for grammar creation tests#!/usr/bin/env python """ :Author Patrik Valkovic :Created 23.06.2017 16:39 :Licence GNUv3 Part of grammpy """
<commit_before><commit_msg>Add directory for grammar creation tests<commit_after>#!/usr/bin/env python """ :Author Patrik Valkovic :Created 23.06.2017 16:39 :Licence GNUv3 Part of grammpy """
79245b929d7084e262d0e1d19db1a368d51ca2fc
datasets/management/commands/clear_computing_keys.py
datasets/management/commands/clear_computing_keys.py
from django.core.management.base import BaseCommand from utils.redis_store import store class Command(BaseCommand): help = 'Remove all computing keys stored in Redis Store. Use it as python manage.py clear_store' def add_arguments(self, parser): pass def handle(self, *args, **options): count = store.delete_compute_keys() print('Deleted {0} keys'.format(count))
Add managment command clear redis computing keys
Add managment command clear redis computing keys
Python
agpl-3.0
MTG/freesound-datasets,MTG/freesound-datasets,MTG/freesound-datasets,MTG/freesound-datasets
Add managment command clear redis computing keys
from django.core.management.base import BaseCommand from utils.redis_store import store class Command(BaseCommand): help = 'Remove all computing keys stored in Redis Store. Use it as python manage.py clear_store' def add_arguments(self, parser): pass def handle(self, *args, **options): count = store.delete_compute_keys() print('Deleted {0} keys'.format(count))
<commit_before><commit_msg>Add managment command clear redis computing keys<commit_after>
from django.core.management.base import BaseCommand from utils.redis_store import store class Command(BaseCommand): help = 'Remove all computing keys stored in Redis Store. Use it as python manage.py clear_store' def add_arguments(self, parser): pass def handle(self, *args, **options): count = store.delete_compute_keys() print('Deleted {0} keys'.format(count))
Add managment command clear redis computing keysfrom django.core.management.base import BaseCommand from utils.redis_store import store class Command(BaseCommand): help = 'Remove all computing keys stored in Redis Store. Use it as python manage.py clear_store' def add_arguments(self, parser): pass def handle(self, *args, **options): count = store.delete_compute_keys() print('Deleted {0} keys'.format(count))
<commit_before><commit_msg>Add managment command clear redis computing keys<commit_after>from django.core.management.base import BaseCommand from utils.redis_store import store class Command(BaseCommand): help = 'Remove all computing keys stored in Redis Store. Use it as python manage.py clear_store' def add_arguments(self, parser): pass def handle(self, *args, **options): count = store.delete_compute_keys() print('Deleted {0} keys'.format(count))
3f2492d522134efd630c3388f4531efd7274dec5
examples/test_request_sb_fixture.py
examples/test_request_sb_fixture.py
def test_request_sb_fixture(request): sb = request.getfixturevalue('sb') sb.open("https://seleniumbase.io/demo_page") sb.assert_text("SeleniumBase", "#myForm h2") sb.assert_element("input#myTextInput") sb.type("#myTextarea", "This is me") sb.click("#myButton") sb.tearDown()
Add an example that pulls in the sb fixture during a test
Add an example that pulls in the sb fixture during a test
Python
mit
seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase
Add an example that pulls in the sb fixture during a test
def test_request_sb_fixture(request): sb = request.getfixturevalue('sb') sb.open("https://seleniumbase.io/demo_page") sb.assert_text("SeleniumBase", "#myForm h2") sb.assert_element("input#myTextInput") sb.type("#myTextarea", "This is me") sb.click("#myButton") sb.tearDown()
<commit_before><commit_msg>Add an example that pulls in the sb fixture during a test<commit_after>
def test_request_sb_fixture(request): sb = request.getfixturevalue('sb') sb.open("https://seleniumbase.io/demo_page") sb.assert_text("SeleniumBase", "#myForm h2") sb.assert_element("input#myTextInput") sb.type("#myTextarea", "This is me") sb.click("#myButton") sb.tearDown()
Add an example that pulls in the sb fixture during a testdef test_request_sb_fixture(request): sb = request.getfixturevalue('sb') sb.open("https://seleniumbase.io/demo_page") sb.assert_text("SeleniumBase", "#myForm h2") sb.assert_element("input#myTextInput") sb.type("#myTextarea", "This is me") sb.click("#myButton") sb.tearDown()
<commit_before><commit_msg>Add an example that pulls in the sb fixture during a test<commit_after>def test_request_sb_fixture(request): sb = request.getfixturevalue('sb') sb.open("https://seleniumbase.io/demo_page") sb.assert_text("SeleniumBase", "#myForm h2") sb.assert_element("input#myTextInput") sb.type("#myTextarea", "This is me") sb.click("#myButton") sb.tearDown()
89d7e956e5194a1533c25b609928c77707f78785
tests/test_importable.py
tests/test_importable.py
"""Basic set of tests to ensure entire code base is importable""" import pytest def test_importable(): """Simple smoketest to ensure all isort modules are importable""" import isort import isort._future import isort._future._dataclasses import isort._version import isort.api import isort.comments import isort.compat import isort.exceptions import isort.finders import isort.format import isort.hooks import isort.isort import isort.logo import isort.main import isort.output import isort.parse import isort.profiles import isort.pylama_isort import isort.sections import isort.settings import isort.setuptools_commands import isort.sorting import isort.stdlibs import isort.stdlibs.all import isort.stdlibs.py2 import isort.stdlibs.py3 import isort.stdlibs.py27 import isort.stdlibs.py35 import isort.stdlibs.py36 import isort.stdlibs.py37 import isort.utils import isort.wrap import isort.wrap_modes with pytest.raises(SystemExit): import isort.__main__
Add basic import smoke test
Add basic import smoke test
Python
mit
PyCQA/isort,PyCQA/isort
Add basic import smoke test
"""Basic set of tests to ensure entire code base is importable""" import pytest def test_importable(): """Simple smoketest to ensure all isort modules are importable""" import isort import isort._future import isort._future._dataclasses import isort._version import isort.api import isort.comments import isort.compat import isort.exceptions import isort.finders import isort.format import isort.hooks import isort.isort import isort.logo import isort.main import isort.output import isort.parse import isort.profiles import isort.pylama_isort import isort.sections import isort.settings import isort.setuptools_commands import isort.sorting import isort.stdlibs import isort.stdlibs.all import isort.stdlibs.py2 import isort.stdlibs.py3 import isort.stdlibs.py27 import isort.stdlibs.py35 import isort.stdlibs.py36 import isort.stdlibs.py37 import isort.utils import isort.wrap import isort.wrap_modes with pytest.raises(SystemExit): import isort.__main__
<commit_before><commit_msg>Add basic import smoke test<commit_after>
"""Basic set of tests to ensure entire code base is importable""" import pytest def test_importable(): """Simple smoketest to ensure all isort modules are importable""" import isort import isort._future import isort._future._dataclasses import isort._version import isort.api import isort.comments import isort.compat import isort.exceptions import isort.finders import isort.format import isort.hooks import isort.isort import isort.logo import isort.main import isort.output import isort.parse import isort.profiles import isort.pylama_isort import isort.sections import isort.settings import isort.setuptools_commands import isort.sorting import isort.stdlibs import isort.stdlibs.all import isort.stdlibs.py2 import isort.stdlibs.py3 import isort.stdlibs.py27 import isort.stdlibs.py35 import isort.stdlibs.py36 import isort.stdlibs.py37 import isort.utils import isort.wrap import isort.wrap_modes with pytest.raises(SystemExit): import isort.__main__
Add basic import smoke test"""Basic set of tests to ensure entire code base is importable""" import pytest def test_importable(): """Simple smoketest to ensure all isort modules are importable""" import isort import isort._future import isort._future._dataclasses import isort._version import isort.api import isort.comments import isort.compat import isort.exceptions import isort.finders import isort.format import isort.hooks import isort.isort import isort.logo import isort.main import isort.output import isort.parse import isort.profiles import isort.pylama_isort import isort.sections import isort.settings import isort.setuptools_commands import isort.sorting import isort.stdlibs import isort.stdlibs.all import isort.stdlibs.py2 import isort.stdlibs.py3 import isort.stdlibs.py27 import isort.stdlibs.py35 import isort.stdlibs.py36 import isort.stdlibs.py37 import isort.utils import isort.wrap import isort.wrap_modes with pytest.raises(SystemExit): import isort.__main__
<commit_before><commit_msg>Add basic import smoke test<commit_after>"""Basic set of tests to ensure entire code base is importable""" import pytest def test_importable(): """Simple smoketest to ensure all isort modules are importable""" import isort import isort._future import isort._future._dataclasses import isort._version import isort.api import isort.comments import isort.compat import isort.exceptions import isort.finders import isort.format import isort.hooks import isort.isort import isort.logo import isort.main import isort.output import isort.parse import isort.profiles import isort.pylama_isort import isort.sections import isort.settings import isort.setuptools_commands import isort.sorting import isort.stdlibs import isort.stdlibs.all import isort.stdlibs.py2 import isort.stdlibs.py3 import isort.stdlibs.py27 import isort.stdlibs.py35 import isort.stdlibs.py36 import isort.stdlibs.py37 import isort.utils import isort.wrap import isort.wrap_modes with pytest.raises(SystemExit): import isort.__main__
7c12c6c8f0be5c611c5463277a574489a4b9a338
migrations/versions/0325_int_letter_rates_fix.py
migrations/versions/0325_int_letter_rates_fix.py
""" Revision ID: 0325_int_letter_rates_fix Revises: 0324_int_letter_rates Create Date: 2020-07-15 10:09:17.218183 """ from datetime import datetime from alembic import op from sqlalchemy.sql import text revision = '0325_int_letter_rates_fix' down_revision = '0324_int_letter_rates' old_start_date = datetime(2020, 7, 1, 0, 0) new_start_date = datetime(2020, 6, 30, 23, 0) def upgrade(): conn = op.get_bind() conn.execute(text( """UPDATE letter_rates SET start_date = :new_start_date WHERE start_date = :old_start_date""" ), new_start_date=new_start_date, old_start_date=old_start_date) def downgrade(): conn = op.get_bind() conn.execute(text( """UPDATE letter_rates SET start_date = :old_start_date WHERE start_date = :new_start_date""" ), old_start_date=old_start_date, new_start_date=new_start_date)
Fix international letter rate start dates
Fix international letter rate start dates These were not taking timezones into account before.
Python
mit
alphagov/notifications-api,alphagov/notifications-api
Fix international letter rate start dates These were not taking timezones into account before.
""" Revision ID: 0325_int_letter_rates_fix Revises: 0324_int_letter_rates Create Date: 2020-07-15 10:09:17.218183 """ from datetime import datetime from alembic import op from sqlalchemy.sql import text revision = '0325_int_letter_rates_fix' down_revision = '0324_int_letter_rates' old_start_date = datetime(2020, 7, 1, 0, 0) new_start_date = datetime(2020, 6, 30, 23, 0) def upgrade(): conn = op.get_bind() conn.execute(text( """UPDATE letter_rates SET start_date = :new_start_date WHERE start_date = :old_start_date""" ), new_start_date=new_start_date, old_start_date=old_start_date) def downgrade(): conn = op.get_bind() conn.execute(text( """UPDATE letter_rates SET start_date = :old_start_date WHERE start_date = :new_start_date""" ), old_start_date=old_start_date, new_start_date=new_start_date)
<commit_before><commit_msg>Fix international letter rate start dates These were not taking timezones into account before.<commit_after>
""" Revision ID: 0325_int_letter_rates_fix Revises: 0324_int_letter_rates Create Date: 2020-07-15 10:09:17.218183 """ from datetime import datetime from alembic import op from sqlalchemy.sql import text revision = '0325_int_letter_rates_fix' down_revision = '0324_int_letter_rates' old_start_date = datetime(2020, 7, 1, 0, 0) new_start_date = datetime(2020, 6, 30, 23, 0) def upgrade(): conn = op.get_bind() conn.execute(text( """UPDATE letter_rates SET start_date = :new_start_date WHERE start_date = :old_start_date""" ), new_start_date=new_start_date, old_start_date=old_start_date) def downgrade(): conn = op.get_bind() conn.execute(text( """UPDATE letter_rates SET start_date = :old_start_date WHERE start_date = :new_start_date""" ), old_start_date=old_start_date, new_start_date=new_start_date)
Fix international letter rate start dates These were not taking timezones into account before.""" Revision ID: 0325_int_letter_rates_fix Revises: 0324_int_letter_rates Create Date: 2020-07-15 10:09:17.218183 """ from datetime import datetime from alembic import op from sqlalchemy.sql import text revision = '0325_int_letter_rates_fix' down_revision = '0324_int_letter_rates' old_start_date = datetime(2020, 7, 1, 0, 0) new_start_date = datetime(2020, 6, 30, 23, 0) def upgrade(): conn = op.get_bind() conn.execute(text( """UPDATE letter_rates SET start_date = :new_start_date WHERE start_date = :old_start_date""" ), new_start_date=new_start_date, old_start_date=old_start_date) def downgrade(): conn = op.get_bind() conn.execute(text( """UPDATE letter_rates SET start_date = :old_start_date WHERE start_date = :new_start_date""" ), old_start_date=old_start_date, new_start_date=new_start_date)
<commit_before><commit_msg>Fix international letter rate start dates These were not taking timezones into account before.<commit_after>""" Revision ID: 0325_int_letter_rates_fix Revises: 0324_int_letter_rates Create Date: 2020-07-15 10:09:17.218183 """ from datetime import datetime from alembic import op from sqlalchemy.sql import text revision = '0325_int_letter_rates_fix' down_revision = '0324_int_letter_rates' old_start_date = datetime(2020, 7, 1, 0, 0) new_start_date = datetime(2020, 6, 30, 23, 0) def upgrade(): conn = op.get_bind() conn.execute(text( """UPDATE letter_rates SET start_date = :new_start_date WHERE start_date = :old_start_date""" ), new_start_date=new_start_date, old_start_date=old_start_date) def downgrade(): conn = op.get_bind() conn.execute(text( """UPDATE letter_rates SET start_date = :old_start_date WHERE start_date = :new_start_date""" ), old_start_date=old_start_date, new_start_date=new_start_date)
2f32dad550d8268b1325747dc1705fa0cc293996
usr/examples/19-Low-Power/sensor_sleep.py
usr/examples/19-Low-Power/sensor_sleep.py
# Sensor Sleep Mode Example. # This example demonstrates the sensor sleep mode. The sleep mode saves around # 40mA when enabled and it's automatically cleared when calling sensor reset(). import sensor, image, time sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) sensor.skip_frames(time = 3000) # Capture frames for 3000ms. sensor.sleep(True) # Enable sensor sleep mode (saves about 40mA).
Add sensor sleep mode example.
Add sensor sleep mode example.
Python
mit
kwagyeman/openmv,kwagyeman/openmv,openmv/openmv,openmv/openmv,iabdalkader/openmv,iabdalkader/openmv,kwagyeman/openmv,iabdalkader/openmv,openmv/openmv,iabdalkader/openmv,openmv/openmv,kwagyeman/openmv
Add sensor sleep mode example.
# Sensor Sleep Mode Example. # This example demonstrates the sensor sleep mode. The sleep mode saves around # 40mA when enabled and it's automatically cleared when calling sensor reset(). import sensor, image, time sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) sensor.skip_frames(time = 3000) # Capture frames for 3000ms. sensor.sleep(True) # Enable sensor sleep mode (saves about 40mA).
<commit_before><commit_msg>Add sensor sleep mode example.<commit_after>
# Sensor Sleep Mode Example. # This example demonstrates the sensor sleep mode. The sleep mode saves around # 40mA when enabled and it's automatically cleared when calling sensor reset(). import sensor, image, time sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) sensor.skip_frames(time = 3000) # Capture frames for 3000ms. sensor.sleep(True) # Enable sensor sleep mode (saves about 40mA).
Add sensor sleep mode example.# Sensor Sleep Mode Example. # This example demonstrates the sensor sleep mode. The sleep mode saves around # 40mA when enabled and it's automatically cleared when calling sensor reset(). import sensor, image, time sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) sensor.skip_frames(time = 3000) # Capture frames for 3000ms. sensor.sleep(True) # Enable sensor sleep mode (saves about 40mA).
<commit_before><commit_msg>Add sensor sleep mode example.<commit_after># Sensor Sleep Mode Example. # This example demonstrates the sensor sleep mode. The sleep mode saves around # 40mA when enabled and it's automatically cleared when calling sensor reset(). import sensor, image, time sensor.reset() # Reset and initialize the sensor. sensor.set_pixformat(sensor.RGB565) # Set pixel format to RGB565 (or GRAYSCALE) sensor.set_framesize(sensor.QVGA) # Set frame size to QVGA (320x240) sensor.skip_frames(time = 3000) # Capture frames for 3000ms. sensor.sleep(True) # Enable sensor sleep mode (saves about 40mA).
3e82ad485bda3f767e2ab099c5be16ab72e48d14
adaptive/csv_to_npz.py
adaptive/csv_to_npz.py
import os import sys import json import numpy as np if __name__ == '__main__': if len(sys.argv) == 2: input_files = [] while True: try: line = raw_input() input_files.append(line) except EOFError: break else: input_files = sys.argv[1:-1] output_dir = sys.argv[-1] for filename in input_files: print filename basename = os.path.basename(filename)[:-4] out_file = os.path.join(output_dir, basename + '.npz') with open(filename, 'r') as f: inputs, boards, outputs = [], [], [] for line in f.readlines(): line = "[" + line.strip() + "]" inp, board, out = json.loads(line) inputs.append(inp) boards.append(board) outputs.append(out) input_arr = np.asarray(inputs) board_arr = np.asarray(boards) output_arr = np.asarray(outputs) np.savez_compressed(out_file, input=input_arr, output=output_arr, board=board_arr)
Add program to convert csv training data to npz training data.
Add program to convert csv training data to npz training data.
Python
apache-2.0
session-id/poker-predictor
Add program to convert csv training data to npz training data.
import os import sys import json import numpy as np if __name__ == '__main__': if len(sys.argv) == 2: input_files = [] while True: try: line = raw_input() input_files.append(line) except EOFError: break else: input_files = sys.argv[1:-1] output_dir = sys.argv[-1] for filename in input_files: print filename basename = os.path.basename(filename)[:-4] out_file = os.path.join(output_dir, basename + '.npz') with open(filename, 'r') as f: inputs, boards, outputs = [], [], [] for line in f.readlines(): line = "[" + line.strip() + "]" inp, board, out = json.loads(line) inputs.append(inp) boards.append(board) outputs.append(out) input_arr = np.asarray(inputs) board_arr = np.asarray(boards) output_arr = np.asarray(outputs) np.savez_compressed(out_file, input=input_arr, output=output_arr, board=board_arr)
<commit_before><commit_msg>Add program to convert csv training data to npz training data.<commit_after>
import os import sys import json import numpy as np if __name__ == '__main__': if len(sys.argv) == 2: input_files = [] while True: try: line = raw_input() input_files.append(line) except EOFError: break else: input_files = sys.argv[1:-1] output_dir = sys.argv[-1] for filename in input_files: print filename basename = os.path.basename(filename)[:-4] out_file = os.path.join(output_dir, basename + '.npz') with open(filename, 'r') as f: inputs, boards, outputs = [], [], [] for line in f.readlines(): line = "[" + line.strip() + "]" inp, board, out = json.loads(line) inputs.append(inp) boards.append(board) outputs.append(out) input_arr = np.asarray(inputs) board_arr = np.asarray(boards) output_arr = np.asarray(outputs) np.savez_compressed(out_file, input=input_arr, output=output_arr, board=board_arr)
Add program to convert csv training data to npz training data.import os import sys import json import numpy as np if __name__ == '__main__': if len(sys.argv) == 2: input_files = [] while True: try: line = raw_input() input_files.append(line) except EOFError: break else: input_files = sys.argv[1:-1] output_dir = sys.argv[-1] for filename in input_files: print filename basename = os.path.basename(filename)[:-4] out_file = os.path.join(output_dir, basename + '.npz') with open(filename, 'r') as f: inputs, boards, outputs = [], [], [] for line in f.readlines(): line = "[" + line.strip() + "]" inp, board, out = json.loads(line) inputs.append(inp) boards.append(board) outputs.append(out) input_arr = np.asarray(inputs) board_arr = np.asarray(boards) output_arr = np.asarray(outputs) np.savez_compressed(out_file, input=input_arr, output=output_arr, board=board_arr)
<commit_before><commit_msg>Add program to convert csv training data to npz training data.<commit_after>import os import sys import json import numpy as np if __name__ == '__main__': if len(sys.argv) == 2: input_files = [] while True: try: line = raw_input() input_files.append(line) except EOFError: break else: input_files = sys.argv[1:-1] output_dir = sys.argv[-1] for filename in input_files: print filename basename = os.path.basename(filename)[:-4] out_file = os.path.join(output_dir, basename + '.npz') with open(filename, 'r') as f: inputs, boards, outputs = [], [], [] for line in f.readlines(): line = "[" + line.strip() + "]" inp, board, out = json.loads(line) inputs.append(inp) boards.append(board) outputs.append(out) input_arr = np.asarray(inputs) board_arr = np.asarray(boards) output_arr = np.asarray(outputs) np.savez_compressed(out_file, input=input_arr, output=output_arr, board=board_arr)
c670a4f175f39018960fc2d96a226e15ad7a6edd
tests/basic_wick_test.py
tests/basic_wick_test.py
"""Test basic Wick expansion of terms. The tests in the module is attempted to test the core Wick facility on terms, without parallelization by Spark. """ from sympy import symbols, IndexedBase from drudge import Range, Vec, CR, AN, FERMI, FockDrudge from drudge.term import sum_term from drudge.wick import wick_expand def test_wick_expansion_of_term(spark_ctx): """Test the basic Wick expansion facility on a single term.""" dr = FockDrudge(spark_ctx, exch=FERMI) op_base = Vec('f') f = op_base[AN] f_dag = op_base[CR] a, b, c, d = symbols('a b c d') r = Range('L') t = IndexedBase('t') u = IndexedBase('u') term = sum_term( (a, r), (b, r), (c, r), (d, r), t[a, b] * u[c, d] * f_dag[a] * f[b] * f_dag[c] * f[d] )[0] res = wick_expand( term, comparator=dr.comparator, contractor=dr.contractor, phase=dr.phase ) assert len(res) == 2 # Simplify the result a little. dumms = {r: [a, b, c, d]} res = { i.simplify_deltas([lambda x: r]) .canon(vec_colour=dr.vec_colour) .reset_dumms(dumms)[0] for i in res } expected = { sum_term( (a, r), (b, r), (c, r), (d, r), t[a, c] * u[b, d] * f_dag[a] * f_dag[b] * f[d] * f[c] )[0], sum_term( (a, r), (b, r), (c, r), t[a, c] * u[c, b] * f_dag[a] * f[b] )[0] } assert (res == expected)
Add test for basic Wick expansion facility
Add test for basic Wick expansion facility The test in this module works on the terms directly without Spark parallelization. It is intended for the ease of debugging.
Python
mit
tschijnmo/drudge,tschijnmo/drudge,tschijnmo/drudge
Add test for basic Wick expansion facility The test in this module works on the terms directly without Spark parallelization. It is intended for the ease of debugging.
"""Test basic Wick expansion of terms. The tests in the module is attempted to test the core Wick facility on terms, without parallelization by Spark. """ from sympy import symbols, IndexedBase from drudge import Range, Vec, CR, AN, FERMI, FockDrudge from drudge.term import sum_term from drudge.wick import wick_expand def test_wick_expansion_of_term(spark_ctx): """Test the basic Wick expansion facility on a single term.""" dr = FockDrudge(spark_ctx, exch=FERMI) op_base = Vec('f') f = op_base[AN] f_dag = op_base[CR] a, b, c, d = symbols('a b c d') r = Range('L') t = IndexedBase('t') u = IndexedBase('u') term = sum_term( (a, r), (b, r), (c, r), (d, r), t[a, b] * u[c, d] * f_dag[a] * f[b] * f_dag[c] * f[d] )[0] res = wick_expand( term, comparator=dr.comparator, contractor=dr.contractor, phase=dr.phase ) assert len(res) == 2 # Simplify the result a little. dumms = {r: [a, b, c, d]} res = { i.simplify_deltas([lambda x: r]) .canon(vec_colour=dr.vec_colour) .reset_dumms(dumms)[0] for i in res } expected = { sum_term( (a, r), (b, r), (c, r), (d, r), t[a, c] * u[b, d] * f_dag[a] * f_dag[b] * f[d] * f[c] )[0], sum_term( (a, r), (b, r), (c, r), t[a, c] * u[c, b] * f_dag[a] * f[b] )[0] } assert (res == expected)
<commit_before><commit_msg>Add test for basic Wick expansion facility The test in this module works on the terms directly without Spark parallelization. It is intended for the ease of debugging.<commit_after>
"""Test basic Wick expansion of terms. The tests in the module is attempted to test the core Wick facility on terms, without parallelization by Spark. """ from sympy import symbols, IndexedBase from drudge import Range, Vec, CR, AN, FERMI, FockDrudge from drudge.term import sum_term from drudge.wick import wick_expand def test_wick_expansion_of_term(spark_ctx): """Test the basic Wick expansion facility on a single term.""" dr = FockDrudge(spark_ctx, exch=FERMI) op_base = Vec('f') f = op_base[AN] f_dag = op_base[CR] a, b, c, d = symbols('a b c d') r = Range('L') t = IndexedBase('t') u = IndexedBase('u') term = sum_term( (a, r), (b, r), (c, r), (d, r), t[a, b] * u[c, d] * f_dag[a] * f[b] * f_dag[c] * f[d] )[0] res = wick_expand( term, comparator=dr.comparator, contractor=dr.contractor, phase=dr.phase ) assert len(res) == 2 # Simplify the result a little. dumms = {r: [a, b, c, d]} res = { i.simplify_deltas([lambda x: r]) .canon(vec_colour=dr.vec_colour) .reset_dumms(dumms)[0] for i in res } expected = { sum_term( (a, r), (b, r), (c, r), (d, r), t[a, c] * u[b, d] * f_dag[a] * f_dag[b] * f[d] * f[c] )[0], sum_term( (a, r), (b, r), (c, r), t[a, c] * u[c, b] * f_dag[a] * f[b] )[0] } assert (res == expected)
Add test for basic Wick expansion facility The test in this module works on the terms directly without Spark parallelization. It is intended for the ease of debugging."""Test basic Wick expansion of terms. The tests in the module is attempted to test the core Wick facility on terms, without parallelization by Spark. """ from sympy import symbols, IndexedBase from drudge import Range, Vec, CR, AN, FERMI, FockDrudge from drudge.term import sum_term from drudge.wick import wick_expand def test_wick_expansion_of_term(spark_ctx): """Test the basic Wick expansion facility on a single term.""" dr = FockDrudge(spark_ctx, exch=FERMI) op_base = Vec('f') f = op_base[AN] f_dag = op_base[CR] a, b, c, d = symbols('a b c d') r = Range('L') t = IndexedBase('t') u = IndexedBase('u') term = sum_term( (a, r), (b, r), (c, r), (d, r), t[a, b] * u[c, d] * f_dag[a] * f[b] * f_dag[c] * f[d] )[0] res = wick_expand( term, comparator=dr.comparator, contractor=dr.contractor, phase=dr.phase ) assert len(res) == 2 # Simplify the result a little. dumms = {r: [a, b, c, d]} res = { i.simplify_deltas([lambda x: r]) .canon(vec_colour=dr.vec_colour) .reset_dumms(dumms)[0] for i in res } expected = { sum_term( (a, r), (b, r), (c, r), (d, r), t[a, c] * u[b, d] * f_dag[a] * f_dag[b] * f[d] * f[c] )[0], sum_term( (a, r), (b, r), (c, r), t[a, c] * u[c, b] * f_dag[a] * f[b] )[0] } assert (res == expected)
<commit_before><commit_msg>Add test for basic Wick expansion facility The test in this module works on the terms directly without Spark parallelization. It is intended for the ease of debugging.<commit_after>"""Test basic Wick expansion of terms. The tests in the module is attempted to test the core Wick facility on terms, without parallelization by Spark. """ from sympy import symbols, IndexedBase from drudge import Range, Vec, CR, AN, FERMI, FockDrudge from drudge.term import sum_term from drudge.wick import wick_expand def test_wick_expansion_of_term(spark_ctx): """Test the basic Wick expansion facility on a single term.""" dr = FockDrudge(spark_ctx, exch=FERMI) op_base = Vec('f') f = op_base[AN] f_dag = op_base[CR] a, b, c, d = symbols('a b c d') r = Range('L') t = IndexedBase('t') u = IndexedBase('u') term = sum_term( (a, r), (b, r), (c, r), (d, r), t[a, b] * u[c, d] * f_dag[a] * f[b] * f_dag[c] * f[d] )[0] res = wick_expand( term, comparator=dr.comparator, contractor=dr.contractor, phase=dr.phase ) assert len(res) == 2 # Simplify the result a little. dumms = {r: [a, b, c, d]} res = { i.simplify_deltas([lambda x: r]) .canon(vec_colour=dr.vec_colour) .reset_dumms(dumms)[0] for i in res } expected = { sum_term( (a, r), (b, r), (c, r), (d, r), t[a, c] * u[b, d] * f_dag[a] * f_dag[b] * f[d] * f[c] )[0], sum_term( (a, r), (b, r), (c, r), t[a, c] * u[c, b] * f_dag[a] * f[b] )[0] } assert (res == expected)
29f8afd5456ffb9c30ee2ddf1567e03ec4e1efed
tests/test_fastpbkdf2.py
tests/test_fastpbkdf2.py
import pytest from fastpbkdf2 import pbkdf2_hmac def test_unsupported_algorithm(): with pytest.raises(ValueError): pbkdf2_hmac("foo", b"password", b"salt", 1)
Add test for unsupported algorithms.
Add test for unsupported algorithms.
Python
apache-2.0
Ayrx/python-fastpbkdf2,Ayrx/python-fastpbkdf2
Add test for unsupported algorithms.
import pytest from fastpbkdf2 import pbkdf2_hmac def test_unsupported_algorithm(): with pytest.raises(ValueError): pbkdf2_hmac("foo", b"password", b"salt", 1)
<commit_before><commit_msg>Add test for unsupported algorithms.<commit_after>
import pytest from fastpbkdf2 import pbkdf2_hmac def test_unsupported_algorithm(): with pytest.raises(ValueError): pbkdf2_hmac("foo", b"password", b"salt", 1)
Add test for unsupported algorithms.import pytest from fastpbkdf2 import pbkdf2_hmac def test_unsupported_algorithm(): with pytest.raises(ValueError): pbkdf2_hmac("foo", b"password", b"salt", 1)
<commit_before><commit_msg>Add test for unsupported algorithms.<commit_after>import pytest from fastpbkdf2 import pbkdf2_hmac def test_unsupported_algorithm(): with pytest.raises(ValueError): pbkdf2_hmac("foo", b"password", b"salt", 1)
466f98425741ec2630af2d5a00f79256c627c608
tests/test_pkg_source.py
tests/test_pkg_source.py
import pytest from flexmock import flexmock import sys import os import shutil import sclbuilder.utils from sclbuilder.pkg_source_plugins.dnf import DnfArchive tests_dir = os.path.split(os.path.abspath(__file__))[0] class TestPkgSource(object): @pytest.mark.parametrize(('input_path', 'expected'), [ (tests_dir + '/test', tests_dir + '/test/'), (tests_dir + '/test/', tests_dir + '/test/') ]) def test_pkg_dir(self, input_path, expected): flexmock(DnfArchive).should_receive('download').once() flexmock(DnfArchive).should_receive('pack').once() flexmock(DnfArchive).should_receive('unpack').once() flexmock(DnfArchive, rpms_from_spec=['pkg1', 'pkg2']) pkg_source = DnfArchive('pkg', input_path) assert pkg_source.pkg_dir == expected shutil.rmtree(tests_dir + '/test/') # @pytest.mark.parametrize(('srpms', 'expected'), [ # (''' # python-flask-0.10.1-7.fc23.noarch # python-flask-doc-0.10.1-7.fc23.noarch # python3-flask-0.10.1-7.fc23.noarch # python3-flask-doc-0.10.1-7.fc23.noarch # ''', {'python-flask', 'python3-flask'}) # ]) # def test_rpms_from_spec(self, srpms, expected): # flexmock(DnfArchive).should_receive('download').once() # flexmock(DnfArchive).should_receive('pack').once() # flexmock(DnfArchive).should_receive('unpack').once() # flexmock(sys.modules['sclbuilder.utils']).should_receive('subprocess_popen_call').and_return({'stdout' : srpms}) # pkg_source = DnfArchive('pkg', 'dir') # assert pkg_source.rpms_from_spec == expected
Add concept of pkg_source tests
Add concept of pkg_source tests
Python
mit
mcyprian/sclbuilder
Add concept of pkg_source tests
import pytest from flexmock import flexmock import sys import os import shutil import sclbuilder.utils from sclbuilder.pkg_source_plugins.dnf import DnfArchive tests_dir = os.path.split(os.path.abspath(__file__))[0] class TestPkgSource(object): @pytest.mark.parametrize(('input_path', 'expected'), [ (tests_dir + '/test', tests_dir + '/test/'), (tests_dir + '/test/', tests_dir + '/test/') ]) def test_pkg_dir(self, input_path, expected): flexmock(DnfArchive).should_receive('download').once() flexmock(DnfArchive).should_receive('pack').once() flexmock(DnfArchive).should_receive('unpack').once() flexmock(DnfArchive, rpms_from_spec=['pkg1', 'pkg2']) pkg_source = DnfArchive('pkg', input_path) assert pkg_source.pkg_dir == expected shutil.rmtree(tests_dir + '/test/') # @pytest.mark.parametrize(('srpms', 'expected'), [ # (''' # python-flask-0.10.1-7.fc23.noarch # python-flask-doc-0.10.1-7.fc23.noarch # python3-flask-0.10.1-7.fc23.noarch # python3-flask-doc-0.10.1-7.fc23.noarch # ''', {'python-flask', 'python3-flask'}) # ]) # def test_rpms_from_spec(self, srpms, expected): # flexmock(DnfArchive).should_receive('download').once() # flexmock(DnfArchive).should_receive('pack').once() # flexmock(DnfArchive).should_receive('unpack').once() # flexmock(sys.modules['sclbuilder.utils']).should_receive('subprocess_popen_call').and_return({'stdout' : srpms}) # pkg_source = DnfArchive('pkg', 'dir') # assert pkg_source.rpms_from_spec == expected
<commit_before><commit_msg>Add concept of pkg_source tests<commit_after>
import pytest from flexmock import flexmock import sys import os import shutil import sclbuilder.utils from sclbuilder.pkg_source_plugins.dnf import DnfArchive tests_dir = os.path.split(os.path.abspath(__file__))[0] class TestPkgSource(object): @pytest.mark.parametrize(('input_path', 'expected'), [ (tests_dir + '/test', tests_dir + '/test/'), (tests_dir + '/test/', tests_dir + '/test/') ]) def test_pkg_dir(self, input_path, expected): flexmock(DnfArchive).should_receive('download').once() flexmock(DnfArchive).should_receive('pack').once() flexmock(DnfArchive).should_receive('unpack').once() flexmock(DnfArchive, rpms_from_spec=['pkg1', 'pkg2']) pkg_source = DnfArchive('pkg', input_path) assert pkg_source.pkg_dir == expected shutil.rmtree(tests_dir + '/test/') # @pytest.mark.parametrize(('srpms', 'expected'), [ # (''' # python-flask-0.10.1-7.fc23.noarch # python-flask-doc-0.10.1-7.fc23.noarch # python3-flask-0.10.1-7.fc23.noarch # python3-flask-doc-0.10.1-7.fc23.noarch # ''', {'python-flask', 'python3-flask'}) # ]) # def test_rpms_from_spec(self, srpms, expected): # flexmock(DnfArchive).should_receive('download').once() # flexmock(DnfArchive).should_receive('pack').once() # flexmock(DnfArchive).should_receive('unpack').once() # flexmock(sys.modules['sclbuilder.utils']).should_receive('subprocess_popen_call').and_return({'stdout' : srpms}) # pkg_source = DnfArchive('pkg', 'dir') # assert pkg_source.rpms_from_spec == expected
Add concept of pkg_source testsimport pytest from flexmock import flexmock import sys import os import shutil import sclbuilder.utils from sclbuilder.pkg_source_plugins.dnf import DnfArchive tests_dir = os.path.split(os.path.abspath(__file__))[0] class TestPkgSource(object): @pytest.mark.parametrize(('input_path', 'expected'), [ (tests_dir + '/test', tests_dir + '/test/'), (tests_dir + '/test/', tests_dir + '/test/') ]) def test_pkg_dir(self, input_path, expected): flexmock(DnfArchive).should_receive('download').once() flexmock(DnfArchive).should_receive('pack').once() flexmock(DnfArchive).should_receive('unpack').once() flexmock(DnfArchive, rpms_from_spec=['pkg1', 'pkg2']) pkg_source = DnfArchive('pkg', input_path) assert pkg_source.pkg_dir == expected shutil.rmtree(tests_dir + '/test/') # @pytest.mark.parametrize(('srpms', 'expected'), [ # (''' # python-flask-0.10.1-7.fc23.noarch # python-flask-doc-0.10.1-7.fc23.noarch # python3-flask-0.10.1-7.fc23.noarch # python3-flask-doc-0.10.1-7.fc23.noarch # ''', {'python-flask', 'python3-flask'}) # ]) # def test_rpms_from_spec(self, srpms, expected): # flexmock(DnfArchive).should_receive('download').once() # flexmock(DnfArchive).should_receive('pack').once() # flexmock(DnfArchive).should_receive('unpack').once() # flexmock(sys.modules['sclbuilder.utils']).should_receive('subprocess_popen_call').and_return({'stdout' : srpms}) # pkg_source = DnfArchive('pkg', 'dir') # assert pkg_source.rpms_from_spec == expected
<commit_before><commit_msg>Add concept of pkg_source tests<commit_after>import pytest from flexmock import flexmock import sys import os import shutil import sclbuilder.utils from sclbuilder.pkg_source_plugins.dnf import DnfArchive tests_dir = os.path.split(os.path.abspath(__file__))[0] class TestPkgSource(object): @pytest.mark.parametrize(('input_path', 'expected'), [ (tests_dir + '/test', tests_dir + '/test/'), (tests_dir + '/test/', tests_dir + '/test/') ]) def test_pkg_dir(self, input_path, expected): flexmock(DnfArchive).should_receive('download').once() flexmock(DnfArchive).should_receive('pack').once() flexmock(DnfArchive).should_receive('unpack').once() flexmock(DnfArchive, rpms_from_spec=['pkg1', 'pkg2']) pkg_source = DnfArchive('pkg', input_path) assert pkg_source.pkg_dir == expected shutil.rmtree(tests_dir + '/test/') # @pytest.mark.parametrize(('srpms', 'expected'), [ # (''' # python-flask-0.10.1-7.fc23.noarch # python-flask-doc-0.10.1-7.fc23.noarch # python3-flask-0.10.1-7.fc23.noarch # python3-flask-doc-0.10.1-7.fc23.noarch # ''', {'python-flask', 'python3-flask'}) # ]) # def test_rpms_from_spec(self, srpms, expected): # flexmock(DnfArchive).should_receive('download').once() # flexmock(DnfArchive).should_receive('pack').once() # flexmock(DnfArchive).should_receive('unpack').once() # flexmock(sys.modules['sclbuilder.utils']).should_receive('subprocess_popen_call').and_return({'stdout' : srpms}) # pkg_source = DnfArchive('pkg', 'dir') # assert pkg_source.rpms_from_spec == expected
6122dd94ca881454319937884ced2efd7b0a6859
scripts/quantize.py
scripts/quantize.py
#!/usr/bin/env python import Image import argparse import os import sys def main(): parser = argparse.ArgumentParser( description='Converts input image to use only given number of colors.') parser.add_argument('-c', '--colors', metavar='COLORS', type=int, default=256, help='Number of colors to use in output image.') parser.add_argument('-d', '--dithering', action='store_true', help='Turn on Floyd-Steinberg dithering.') parser.add_argument('-f', '--force', action='store_true', help='If output image exists, the tool will overwrite it.') parser.add_argument('input', metavar='INPUT', type=str, help='Input image filename.') parser.add_argument('output', metavar='OUTPUT', type=str, help='Output image filename.') args = parser.parse_args() inputPath = os.path.abspath(args.input) outputPath = os.path.abspath(args.output) if not os.path.isfile(inputPath): raise SystemExit('Input file does not exists!') if os.path.isfile(outputPath) and not args.force: raise SystemExit('Will not overwrite output file!') if inputPath == outputPath: raise SystemExit('Input and output files have to be different!') try: image = Image.open(inputPath) except IOError as ex: raise SystemExit('Error: %s.' % ex) else: image = image.convert('RGB') output = image.convert('P', palette=Image.ADAPTIVE, colors=args.colors) if args.dithering: output = image.quantize(palette=output) output.save(outputPath) if __name__ == '__main__': main()
Add tool to reduce number of colors in an image.
Add tool to reduce number of colors in an image.
Python
artistic-2.0
cahirwpz/demoscene,cahirwpz/demoscene,cahirwpz/demoscene,cahirwpz/demoscene
Add tool to reduce number of colors in an image.
#!/usr/bin/env python import Image import argparse import os import sys def main(): parser = argparse.ArgumentParser( description='Converts input image to use only given number of colors.') parser.add_argument('-c', '--colors', metavar='COLORS', type=int, default=256, help='Number of colors to use in output image.') parser.add_argument('-d', '--dithering', action='store_true', help='Turn on Floyd-Steinberg dithering.') parser.add_argument('-f', '--force', action='store_true', help='If output image exists, the tool will overwrite it.') parser.add_argument('input', metavar='INPUT', type=str, help='Input image filename.') parser.add_argument('output', metavar='OUTPUT', type=str, help='Output image filename.') args = parser.parse_args() inputPath = os.path.abspath(args.input) outputPath = os.path.abspath(args.output) if not os.path.isfile(inputPath): raise SystemExit('Input file does not exists!') if os.path.isfile(outputPath) and not args.force: raise SystemExit('Will not overwrite output file!') if inputPath == outputPath: raise SystemExit('Input and output files have to be different!') try: image = Image.open(inputPath) except IOError as ex: raise SystemExit('Error: %s.' % ex) else: image = image.convert('RGB') output = image.convert('P', palette=Image.ADAPTIVE, colors=args.colors) if args.dithering: output = image.quantize(palette=output) output.save(outputPath) if __name__ == '__main__': main()
<commit_before><commit_msg>Add tool to reduce number of colors in an image.<commit_after>
#!/usr/bin/env python import Image import argparse import os import sys def main(): parser = argparse.ArgumentParser( description='Converts input image to use only given number of colors.') parser.add_argument('-c', '--colors', metavar='COLORS', type=int, default=256, help='Number of colors to use in output image.') parser.add_argument('-d', '--dithering', action='store_true', help='Turn on Floyd-Steinberg dithering.') parser.add_argument('-f', '--force', action='store_true', help='If output image exists, the tool will overwrite it.') parser.add_argument('input', metavar='INPUT', type=str, help='Input image filename.') parser.add_argument('output', metavar='OUTPUT', type=str, help='Output image filename.') args = parser.parse_args() inputPath = os.path.abspath(args.input) outputPath = os.path.abspath(args.output) if not os.path.isfile(inputPath): raise SystemExit('Input file does not exists!') if os.path.isfile(outputPath) and not args.force: raise SystemExit('Will not overwrite output file!') if inputPath == outputPath: raise SystemExit('Input and output files have to be different!') try: image = Image.open(inputPath) except IOError as ex: raise SystemExit('Error: %s.' % ex) else: image = image.convert('RGB') output = image.convert('P', palette=Image.ADAPTIVE, colors=args.colors) if args.dithering: output = image.quantize(palette=output) output.save(outputPath) if __name__ == '__main__': main()
Add tool to reduce number of colors in an image.#!/usr/bin/env python import Image import argparse import os import sys def main(): parser = argparse.ArgumentParser( description='Converts input image to use only given number of colors.') parser.add_argument('-c', '--colors', metavar='COLORS', type=int, default=256, help='Number of colors to use in output image.') parser.add_argument('-d', '--dithering', action='store_true', help='Turn on Floyd-Steinberg dithering.') parser.add_argument('-f', '--force', action='store_true', help='If output image exists, the tool will overwrite it.') parser.add_argument('input', metavar='INPUT', type=str, help='Input image filename.') parser.add_argument('output', metavar='OUTPUT', type=str, help='Output image filename.') args = parser.parse_args() inputPath = os.path.abspath(args.input) outputPath = os.path.abspath(args.output) if not os.path.isfile(inputPath): raise SystemExit('Input file does not exists!') if os.path.isfile(outputPath) and not args.force: raise SystemExit('Will not overwrite output file!') if inputPath == outputPath: raise SystemExit('Input and output files have to be different!') try: image = Image.open(inputPath) except IOError as ex: raise SystemExit('Error: %s.' % ex) else: image = image.convert('RGB') output = image.convert('P', palette=Image.ADAPTIVE, colors=args.colors) if args.dithering: output = image.quantize(palette=output) output.save(outputPath) if __name__ == '__main__': main()
<commit_before><commit_msg>Add tool to reduce number of colors in an image.<commit_after>#!/usr/bin/env python import Image import argparse import os import sys def main(): parser = argparse.ArgumentParser( description='Converts input image to use only given number of colors.') parser.add_argument('-c', '--colors', metavar='COLORS', type=int, default=256, help='Number of colors to use in output image.') parser.add_argument('-d', '--dithering', action='store_true', help='Turn on Floyd-Steinberg dithering.') parser.add_argument('-f', '--force', action='store_true', help='If output image exists, the tool will overwrite it.') parser.add_argument('input', metavar='INPUT', type=str, help='Input image filename.') parser.add_argument('output', metavar='OUTPUT', type=str, help='Output image filename.') args = parser.parse_args() inputPath = os.path.abspath(args.input) outputPath = os.path.abspath(args.output) if not os.path.isfile(inputPath): raise SystemExit('Input file does not exists!') if os.path.isfile(outputPath) and not args.force: raise SystemExit('Will not overwrite output file!') if inputPath == outputPath: raise SystemExit('Input and output files have to be different!') try: image = Image.open(inputPath) except IOError as ex: raise SystemExit('Error: %s.' % ex) else: image = image.convert('RGB') output = image.convert('P', palette=Image.ADAPTIVE, colors=args.colors) if args.dithering: output = image.quantize(palette=output) output.save(outputPath) if __name__ == '__main__': main()
b56b7ed23ce60a352f163d21fedff63fe2a1c44a
scheduler/scheduled_external_program.py
scheduler/scheduled_external_program.py
import luigi from luigi.contrib.external_program import ExternalProgramTask, ExternalProgramRunError from subprocess import Popen, PIPE, check_call import os import datetime import logging logger = logging.getLogger('luigi-interface') class Scheduler(object): @classmethod def fromblurb(cls, blurb): for subcls in cls.__subclasses__(): if subcls.blurb == blurb: return subcls() else: raise ValueError('{} is not a reckognized scheduler.'.format(blurb)) @classmethod def run(self, task): raise NotImplemented class LocalScheduler(Scheduler): blurb = 'local' @classmethod def run(self, task): args = list(map(str, task.program_args())) env = task.program_environment() logger.info('Running command {}'.format(' '.join(args))) proc = Popen(args, env=env, stdout=PIPE, stderr=PIPE, universal_newlines=True) stdout, stderr = proc.communicate() if proc.returncode != 0: raise ExternalProgramRunError('Program exited with non-zero return code.', args, env, stdout, stderr) if task.capture_output: logger.info('Program stdout:\n{}'.format(stdout)) logger.info('Program stderr:\n{}'.format(stderr)) class SlurmScheduler(Scheduler): blurb = 'slurm' @classmethod def run(self, task): srun_args = [ '--time', '{}:{}:{}'.format(task.walltime.seconds % 3600, (task.walltime.seconds // 3600) % 60, ((task.walltime.seconds // 3600) // 60)), '--mem', '{}M'.format(task.memory), '--cpus-per-task', str(task.ncpus)] args = list(map(str, task.program_args())) env = task.program_environment() logger.info('Running command {}'.format(' '.join(args))) proc = Popen(['srun'] + srun_args + args, env=env, stdout=PIPE, stderr=PIPE, universal_newlines=True) stdout, stderr = proc.communicate() if proc.returncode != 0: raise ExternalProgramRunError('Program exited with non-zero return code.', args, env, stdout, stderr) if task.capture_output: logger.info('Program stdout:\n{}'.format(stdout)) logger.info('Program stderr:\n{}'.format(stderr)) class ScheduledExternalProgramTask(ExternalProgramTask): """ Variant of luigi.contrib.external_program.ExternalProgramTask that runs on a job scheduler. """ walltime = luigi.TimeDeltaParameter(default=datetime.timedelta(hours=1)) ncpus = luigi.IntParameter(default=1) memory = luigi.FloatParameter(default=1024) scheduler = luigi.ChoiceParameter(choices=[cls.blurb for cls in Scheduler.__subclasses__()], default='local') def run(self): return Scheduler.fromblurb(self.scheduler).run(self)
Implement a scheduled version of Luigi's external program module
Implement a scheduled version of Luigi's external program module
Python
unlicense
ppavlidis/rnaseq-pipeline,ppavlidis/rnaseq-pipeline,ppavlidis/rnaseq-pipeline
Implement a scheduled version of Luigi's external program module
import luigi from luigi.contrib.external_program import ExternalProgramTask, ExternalProgramRunError from subprocess import Popen, PIPE, check_call import os import datetime import logging logger = logging.getLogger('luigi-interface') class Scheduler(object): @classmethod def fromblurb(cls, blurb): for subcls in cls.__subclasses__(): if subcls.blurb == blurb: return subcls() else: raise ValueError('{} is not a reckognized scheduler.'.format(blurb)) @classmethod def run(self, task): raise NotImplemented class LocalScheduler(Scheduler): blurb = 'local' @classmethod def run(self, task): args = list(map(str, task.program_args())) env = task.program_environment() logger.info('Running command {}'.format(' '.join(args))) proc = Popen(args, env=env, stdout=PIPE, stderr=PIPE, universal_newlines=True) stdout, stderr = proc.communicate() if proc.returncode != 0: raise ExternalProgramRunError('Program exited with non-zero return code.', args, env, stdout, stderr) if task.capture_output: logger.info('Program stdout:\n{}'.format(stdout)) logger.info('Program stderr:\n{}'.format(stderr)) class SlurmScheduler(Scheduler): blurb = 'slurm' @classmethod def run(self, task): srun_args = [ '--time', '{}:{}:{}'.format(task.walltime.seconds % 3600, (task.walltime.seconds // 3600) % 60, ((task.walltime.seconds // 3600) // 60)), '--mem', '{}M'.format(task.memory), '--cpus-per-task', str(task.ncpus)] args = list(map(str, task.program_args())) env = task.program_environment() logger.info('Running command {}'.format(' '.join(args))) proc = Popen(['srun'] + srun_args + args, env=env, stdout=PIPE, stderr=PIPE, universal_newlines=True) stdout, stderr = proc.communicate() if proc.returncode != 0: raise ExternalProgramRunError('Program exited with non-zero return code.', args, env, stdout, stderr) if task.capture_output: logger.info('Program stdout:\n{}'.format(stdout)) logger.info('Program stderr:\n{}'.format(stderr)) class ScheduledExternalProgramTask(ExternalProgramTask): """ Variant of luigi.contrib.external_program.ExternalProgramTask that runs on a job scheduler. """ walltime = luigi.TimeDeltaParameter(default=datetime.timedelta(hours=1)) ncpus = luigi.IntParameter(default=1) memory = luigi.FloatParameter(default=1024) scheduler = luigi.ChoiceParameter(choices=[cls.blurb for cls in Scheduler.__subclasses__()], default='local') def run(self): return Scheduler.fromblurb(self.scheduler).run(self)
<commit_before><commit_msg>Implement a scheduled version of Luigi's external program module<commit_after>
import luigi from luigi.contrib.external_program import ExternalProgramTask, ExternalProgramRunError from subprocess import Popen, PIPE, check_call import os import datetime import logging logger = logging.getLogger('luigi-interface') class Scheduler(object): @classmethod def fromblurb(cls, blurb): for subcls in cls.__subclasses__(): if subcls.blurb == blurb: return subcls() else: raise ValueError('{} is not a reckognized scheduler.'.format(blurb)) @classmethod def run(self, task): raise NotImplemented class LocalScheduler(Scheduler): blurb = 'local' @classmethod def run(self, task): args = list(map(str, task.program_args())) env = task.program_environment() logger.info('Running command {}'.format(' '.join(args))) proc = Popen(args, env=env, stdout=PIPE, stderr=PIPE, universal_newlines=True) stdout, stderr = proc.communicate() if proc.returncode != 0: raise ExternalProgramRunError('Program exited with non-zero return code.', args, env, stdout, stderr) if task.capture_output: logger.info('Program stdout:\n{}'.format(stdout)) logger.info('Program stderr:\n{}'.format(stderr)) class SlurmScheduler(Scheduler): blurb = 'slurm' @classmethod def run(self, task): srun_args = [ '--time', '{}:{}:{}'.format(task.walltime.seconds % 3600, (task.walltime.seconds // 3600) % 60, ((task.walltime.seconds // 3600) // 60)), '--mem', '{}M'.format(task.memory), '--cpus-per-task', str(task.ncpus)] args = list(map(str, task.program_args())) env = task.program_environment() logger.info('Running command {}'.format(' '.join(args))) proc = Popen(['srun'] + srun_args + args, env=env, stdout=PIPE, stderr=PIPE, universal_newlines=True) stdout, stderr = proc.communicate() if proc.returncode != 0: raise ExternalProgramRunError('Program exited with non-zero return code.', args, env, stdout, stderr) if task.capture_output: logger.info('Program stdout:\n{}'.format(stdout)) logger.info('Program stderr:\n{}'.format(stderr)) class ScheduledExternalProgramTask(ExternalProgramTask): """ Variant of luigi.contrib.external_program.ExternalProgramTask that runs on a job scheduler. """ walltime = luigi.TimeDeltaParameter(default=datetime.timedelta(hours=1)) ncpus = luigi.IntParameter(default=1) memory = luigi.FloatParameter(default=1024) scheduler = luigi.ChoiceParameter(choices=[cls.blurb for cls in Scheduler.__subclasses__()], default='local') def run(self): return Scheduler.fromblurb(self.scheduler).run(self)
Implement a scheduled version of Luigi's external program moduleimport luigi from luigi.contrib.external_program import ExternalProgramTask, ExternalProgramRunError from subprocess import Popen, PIPE, check_call import os import datetime import logging logger = logging.getLogger('luigi-interface') class Scheduler(object): @classmethod def fromblurb(cls, blurb): for subcls in cls.__subclasses__(): if subcls.blurb == blurb: return subcls() else: raise ValueError('{} is not a reckognized scheduler.'.format(blurb)) @classmethod def run(self, task): raise NotImplemented class LocalScheduler(Scheduler): blurb = 'local' @classmethod def run(self, task): args = list(map(str, task.program_args())) env = task.program_environment() logger.info('Running command {}'.format(' '.join(args))) proc = Popen(args, env=env, stdout=PIPE, stderr=PIPE, universal_newlines=True) stdout, stderr = proc.communicate() if proc.returncode != 0: raise ExternalProgramRunError('Program exited with non-zero return code.', args, env, stdout, stderr) if task.capture_output: logger.info('Program stdout:\n{}'.format(stdout)) logger.info('Program stderr:\n{}'.format(stderr)) class SlurmScheduler(Scheduler): blurb = 'slurm' @classmethod def run(self, task): srun_args = [ '--time', '{}:{}:{}'.format(task.walltime.seconds % 3600, (task.walltime.seconds // 3600) % 60, ((task.walltime.seconds // 3600) // 60)), '--mem', '{}M'.format(task.memory), '--cpus-per-task', str(task.ncpus)] args = list(map(str, task.program_args())) env = task.program_environment() logger.info('Running command {}'.format(' '.join(args))) proc = Popen(['srun'] + srun_args + args, env=env, stdout=PIPE, stderr=PIPE, universal_newlines=True) stdout, stderr = proc.communicate() if proc.returncode != 0: raise ExternalProgramRunError('Program exited with non-zero return code.', args, env, stdout, stderr) if task.capture_output: logger.info('Program stdout:\n{}'.format(stdout)) logger.info('Program stderr:\n{}'.format(stderr)) class ScheduledExternalProgramTask(ExternalProgramTask): """ Variant of luigi.contrib.external_program.ExternalProgramTask that runs on a job scheduler. """ walltime = luigi.TimeDeltaParameter(default=datetime.timedelta(hours=1)) ncpus = luigi.IntParameter(default=1) memory = luigi.FloatParameter(default=1024) scheduler = luigi.ChoiceParameter(choices=[cls.blurb for cls in Scheduler.__subclasses__()], default='local') def run(self): return Scheduler.fromblurb(self.scheduler).run(self)
<commit_before><commit_msg>Implement a scheduled version of Luigi's external program module<commit_after>import luigi from luigi.contrib.external_program import ExternalProgramTask, ExternalProgramRunError from subprocess import Popen, PIPE, check_call import os import datetime import logging logger = logging.getLogger('luigi-interface') class Scheduler(object): @classmethod def fromblurb(cls, blurb): for subcls in cls.__subclasses__(): if subcls.blurb == blurb: return subcls() else: raise ValueError('{} is not a reckognized scheduler.'.format(blurb)) @classmethod def run(self, task): raise NotImplemented class LocalScheduler(Scheduler): blurb = 'local' @classmethod def run(self, task): args = list(map(str, task.program_args())) env = task.program_environment() logger.info('Running command {}'.format(' '.join(args))) proc = Popen(args, env=env, stdout=PIPE, stderr=PIPE, universal_newlines=True) stdout, stderr = proc.communicate() if proc.returncode != 0: raise ExternalProgramRunError('Program exited with non-zero return code.', args, env, stdout, stderr) if task.capture_output: logger.info('Program stdout:\n{}'.format(stdout)) logger.info('Program stderr:\n{}'.format(stderr)) class SlurmScheduler(Scheduler): blurb = 'slurm' @classmethod def run(self, task): srun_args = [ '--time', '{}:{}:{}'.format(task.walltime.seconds % 3600, (task.walltime.seconds // 3600) % 60, ((task.walltime.seconds // 3600) // 60)), '--mem', '{}M'.format(task.memory), '--cpus-per-task', str(task.ncpus)] args = list(map(str, task.program_args())) env = task.program_environment() logger.info('Running command {}'.format(' '.join(args))) proc = Popen(['srun'] + srun_args + args, env=env, stdout=PIPE, stderr=PIPE, universal_newlines=True) stdout, stderr = proc.communicate() if proc.returncode != 0: raise ExternalProgramRunError('Program exited with non-zero return code.', args, env, stdout, stderr) if task.capture_output: logger.info('Program stdout:\n{}'.format(stdout)) logger.info('Program stderr:\n{}'.format(stderr)) class ScheduledExternalProgramTask(ExternalProgramTask): """ Variant of luigi.contrib.external_program.ExternalProgramTask that runs on a job scheduler. """ walltime = luigi.TimeDeltaParameter(default=datetime.timedelta(hours=1)) ncpus = luigi.IntParameter(default=1) memory = luigi.FloatParameter(default=1024) scheduler = luigi.ChoiceParameter(choices=[cls.blurb for cls in Scheduler.__subclasses__()], default='local') def run(self): return Scheduler.fromblurb(self.scheduler).run(self)
085c5a28c94ba45680848886e9a77b99639bd930
NEO_flyby.py
NEO_flyby.py
import time import datetime import requests import json def get_NEO_flyby(): neo_data = [] unix = time.time() datestamp = datetime.datetime.fromtimestamp(unix).strftime("%Y-%b-%d") json_data_url = requests.get("https://ssd-api.jpl.nasa.gov/cad.api?body=Earth&dist-max=20LD") json_data = json.loads(json_data_url.text) for i in range(len(json_data["data"])): neo_date = json_data["data"][i][3][:11] neo_time = json_data["data"][i][3][11:] if neo_date == datestamp: neo_data.append((json_data["data"][i][0],)) # sorte lieber per magnitude und nimm nur das größte objekt, sonst ist der tweet zu lang get_NEO_flyby() # TODO: Add api indicator of numbers # TODO: Iterate over data and return tuple
Update 0.1.0 - Get neo flyby data - Check for valid entrys for today
Update 0.1.0 - Get neo flyby data - Check for valid entrys for today
Python
mit
FXelix/space_facts_bot
Update 0.1.0 - Get neo flyby data - Check for valid entrys for today
import time import datetime import requests import json def get_NEO_flyby(): neo_data = [] unix = time.time() datestamp = datetime.datetime.fromtimestamp(unix).strftime("%Y-%b-%d") json_data_url = requests.get("https://ssd-api.jpl.nasa.gov/cad.api?body=Earth&dist-max=20LD") json_data = json.loads(json_data_url.text) for i in range(len(json_data["data"])): neo_date = json_data["data"][i][3][:11] neo_time = json_data["data"][i][3][11:] if neo_date == datestamp: neo_data.append((json_data["data"][i][0],)) # sorte lieber per magnitude und nimm nur das größte objekt, sonst ist der tweet zu lang get_NEO_flyby() # TODO: Add api indicator of numbers # TODO: Iterate over data and return tuple
<commit_before><commit_msg>Update 0.1.0 - Get neo flyby data - Check for valid entrys for today<commit_after>
import time import datetime import requests import json def get_NEO_flyby(): neo_data = [] unix = time.time() datestamp = datetime.datetime.fromtimestamp(unix).strftime("%Y-%b-%d") json_data_url = requests.get("https://ssd-api.jpl.nasa.gov/cad.api?body=Earth&dist-max=20LD") json_data = json.loads(json_data_url.text) for i in range(len(json_data["data"])): neo_date = json_data["data"][i][3][:11] neo_time = json_data["data"][i][3][11:] if neo_date == datestamp: neo_data.append((json_data["data"][i][0],)) # sorte lieber per magnitude und nimm nur das größte objekt, sonst ist der tweet zu lang get_NEO_flyby() # TODO: Add api indicator of numbers # TODO: Iterate over data and return tuple
Update 0.1.0 - Get neo flyby data - Check for valid entrys for today import time import datetime import requests import json def get_NEO_flyby(): neo_data = [] unix = time.time() datestamp = datetime.datetime.fromtimestamp(unix).strftime("%Y-%b-%d") json_data_url = requests.get("https://ssd-api.jpl.nasa.gov/cad.api?body=Earth&dist-max=20LD") json_data = json.loads(json_data_url.text) for i in range(len(json_data["data"])): neo_date = json_data["data"][i][3][:11] neo_time = json_data["data"][i][3][11:] if neo_date == datestamp: neo_data.append((json_data["data"][i][0],)) # sorte lieber per magnitude und nimm nur das größte objekt, sonst ist der tweet zu lang get_NEO_flyby() # TODO: Add api indicator of numbers # TODO: Iterate over data and return tuple
<commit_before><commit_msg>Update 0.1.0 - Get neo flyby data - Check for valid entrys for today<commit_after> import time import datetime import requests import json def get_NEO_flyby(): neo_data = [] unix = time.time() datestamp = datetime.datetime.fromtimestamp(unix).strftime("%Y-%b-%d") json_data_url = requests.get("https://ssd-api.jpl.nasa.gov/cad.api?body=Earth&dist-max=20LD") json_data = json.loads(json_data_url.text) for i in range(len(json_data["data"])): neo_date = json_data["data"][i][3][:11] neo_time = json_data["data"][i][3][11:] if neo_date == datestamp: neo_data.append((json_data["data"][i][0],)) # sorte lieber per magnitude und nimm nur das größte objekt, sonst ist der tweet zu lang get_NEO_flyby() # TODO: Add api indicator of numbers # TODO: Iterate over data and return tuple
ce8c81cdf4d9a4d7f6704a80f8218ecfdf9a03fe
OIPA/iati_synchroniser/migrations/0024_datasetfailedpickup.py
OIPA/iati_synchroniser/migrations/0024_datasetfailedpickup.py
# Generated by Django 2.0.13 on 2021-01-22 10:56 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('iati_synchroniser', '0023_auto_20210110_0059'), ] operations = [ migrations.CreateModel( name='DatasetFailedPickup', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('is_http_error', models.BooleanField(default=False)), ('status_code', models.CharField(max_length=100, null=True)), ('error_detail', models.TextField(max_length=1000, null=True)), ('timestamp', models.DateTimeField(auto_now=True)), ('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='iati_synchroniser.Dataset')), ], ), ]
Add migration for latest version
Add migration for latest version
Python
agpl-3.0
openaid-IATI/OIPA,openaid-IATI/OIPA,zimmerman-zimmerman/OIPA,zimmerman-zimmerman/OIPA,zimmerman-zimmerman/OIPA,openaid-IATI/OIPA,openaid-IATI/OIPA,zimmerman-zimmerman/OIPA,zimmerman-zimmerman/OIPA,openaid-IATI/OIPA
Add migration for latest version
# Generated by Django 2.0.13 on 2021-01-22 10:56 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('iati_synchroniser', '0023_auto_20210110_0059'), ] operations = [ migrations.CreateModel( name='DatasetFailedPickup', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('is_http_error', models.BooleanField(default=False)), ('status_code', models.CharField(max_length=100, null=True)), ('error_detail', models.TextField(max_length=1000, null=True)), ('timestamp', models.DateTimeField(auto_now=True)), ('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='iati_synchroniser.Dataset')), ], ), ]
<commit_before><commit_msg>Add migration for latest version<commit_after>
# Generated by Django 2.0.13 on 2021-01-22 10:56 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('iati_synchroniser', '0023_auto_20210110_0059'), ] operations = [ migrations.CreateModel( name='DatasetFailedPickup', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('is_http_error', models.BooleanField(default=False)), ('status_code', models.CharField(max_length=100, null=True)), ('error_detail', models.TextField(max_length=1000, null=True)), ('timestamp', models.DateTimeField(auto_now=True)), ('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='iati_synchroniser.Dataset')), ], ), ]
Add migration for latest version# Generated by Django 2.0.13 on 2021-01-22 10:56 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('iati_synchroniser', '0023_auto_20210110_0059'), ] operations = [ migrations.CreateModel( name='DatasetFailedPickup', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('is_http_error', models.BooleanField(default=False)), ('status_code', models.CharField(max_length=100, null=True)), ('error_detail', models.TextField(max_length=1000, null=True)), ('timestamp', models.DateTimeField(auto_now=True)), ('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='iati_synchroniser.Dataset')), ], ), ]
<commit_before><commit_msg>Add migration for latest version<commit_after># Generated by Django 2.0.13 on 2021-01-22 10:56 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('iati_synchroniser', '0023_auto_20210110_0059'), ] operations = [ migrations.CreateModel( name='DatasetFailedPickup', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('is_http_error', models.BooleanField(default=False)), ('status_code', models.CharField(max_length=100, null=True)), ('error_detail', models.TextField(max_length=1000, null=True)), ('timestamp', models.DateTimeField(auto_now=True)), ('dataset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='iati_synchroniser.Dataset')), ], ), ]
3c2ee5a5b884edcd8dd270ac78efe60b1d453daa
profile_collection/acceptance_tests/06-scan-slits.py
profile_collection/acceptance_tests/06-scan-slits.py
from bluesky.plans import relative_inner_product_scan from bluesky.callbacks import LiveTable assert slt_mb2.connected RE(relative_inner_product_scan([em], 5, slt_mb2.o, 0, 1, slt_mb2.i, 0, 1), LiveTable([slt_mb2.o, slt_mb2.i, em]))
Add scan slits test with inner_product_scan.
Add scan slits test with inner_product_scan.
Python
bsd-2-clause
NSLS-II-XPD/ipython_ophyd,NSLS-II-XPD/ipython_ophyd
Add scan slits test with inner_product_scan.
from bluesky.plans import relative_inner_product_scan from bluesky.callbacks import LiveTable assert slt_mb2.connected RE(relative_inner_product_scan([em], 5, slt_mb2.o, 0, 1, slt_mb2.i, 0, 1), LiveTable([slt_mb2.o, slt_mb2.i, em]))
<commit_before><commit_msg>Add scan slits test with inner_product_scan.<commit_after>
from bluesky.plans import relative_inner_product_scan from bluesky.callbacks import LiveTable assert slt_mb2.connected RE(relative_inner_product_scan([em], 5, slt_mb2.o, 0, 1, slt_mb2.i, 0, 1), LiveTable([slt_mb2.o, slt_mb2.i, em]))
Add scan slits test with inner_product_scan.from bluesky.plans import relative_inner_product_scan from bluesky.callbacks import LiveTable assert slt_mb2.connected RE(relative_inner_product_scan([em], 5, slt_mb2.o, 0, 1, slt_mb2.i, 0, 1), LiveTable([slt_mb2.o, slt_mb2.i, em]))
<commit_before><commit_msg>Add scan slits test with inner_product_scan.<commit_after>from bluesky.plans import relative_inner_product_scan from bluesky.callbacks import LiveTable assert slt_mb2.connected RE(relative_inner_product_scan([em], 5, slt_mb2.o, 0, 1, slt_mb2.i, 0, 1), LiveTable([slt_mb2.o, slt_mb2.i, em]))
5ecbb43f1b6b7110b7d312b1dcbdd7a2baff629d
src/collectors/consul/ConsulCollector.py
src/collectors/consul/ConsulCollector.py
import diamond.collector class ConsulCollector(diamond.collector.Collector): def get_default_config_help(self): return super(ConsulCollector, self).get_default_config_help() def get_default_config(self): """ Returns the default collector settings """ config = super(ConsulCollector, self).get_default_config() config.update({ 'url': 'http://localhost:8500', 'path': 'consul' }) return config def collect(self): self.log.info('Collecting for Consul')
Add an empty Consul collector
Add an empty Consul collector
Python
mit
Netuitive/netuitive-diamond,Netuitive/netuitive-diamond,Netuitive/netuitive-diamond,Netuitive/netuitive-diamond
Add an empty Consul collector
import diamond.collector class ConsulCollector(diamond.collector.Collector): def get_default_config_help(self): return super(ConsulCollector, self).get_default_config_help() def get_default_config(self): """ Returns the default collector settings """ config = super(ConsulCollector, self).get_default_config() config.update({ 'url': 'http://localhost:8500', 'path': 'consul' }) return config def collect(self): self.log.info('Collecting for Consul')
<commit_before><commit_msg>Add an empty Consul collector<commit_after>
import diamond.collector class ConsulCollector(diamond.collector.Collector): def get_default_config_help(self): return super(ConsulCollector, self).get_default_config_help() def get_default_config(self): """ Returns the default collector settings """ config = super(ConsulCollector, self).get_default_config() config.update({ 'url': 'http://localhost:8500', 'path': 'consul' }) return config def collect(self): self.log.info('Collecting for Consul')
Add an empty Consul collectorimport diamond.collector class ConsulCollector(diamond.collector.Collector): def get_default_config_help(self): return super(ConsulCollector, self).get_default_config_help() def get_default_config(self): """ Returns the default collector settings """ config = super(ConsulCollector, self).get_default_config() config.update({ 'url': 'http://localhost:8500', 'path': 'consul' }) return config def collect(self): self.log.info('Collecting for Consul')
<commit_before><commit_msg>Add an empty Consul collector<commit_after>import diamond.collector class ConsulCollector(diamond.collector.Collector): def get_default_config_help(self): return super(ConsulCollector, self).get_default_config_help() def get_default_config(self): """ Returns the default collector settings """ config = super(ConsulCollector, self).get_default_config() config.update({ 'url': 'http://localhost:8500', 'path': 'consul' }) return config def collect(self): self.log.info('Collecting for Consul')
6c804529f1c7f6a724b757c380e58ef493e787ea
CodeFights/createArray.py
CodeFights/createArray.py
#!/usr/local/bin/python # Code Fights Create Array Problem def createArray(size): return [1] * size def main(): tests = [ [4, [1, 1, 1, 1]], [2, [1, 1]], [1, [1]], [5, [1, 1, 1, 1, 1]] ] for t in tests: res = createArray(t[0]) ans = t[1] if ans == res: print("PASSED: createArray({}) returned {}" .format(t[0], res)) else: print("FAILED: createArray({}) returned {}, answer: {}" .format(t[0], res, ans)) if __name__ == '__main__': main()
Solve Code Fights create array problem
Solve Code Fights create array problem
Python
mit
HKuz/Test_Code
Solve Code Fights create array problem
#!/usr/local/bin/python # Code Fights Create Array Problem def createArray(size): return [1] * size def main(): tests = [ [4, [1, 1, 1, 1]], [2, [1, 1]], [1, [1]], [5, [1, 1, 1, 1, 1]] ] for t in tests: res = createArray(t[0]) ans = t[1] if ans == res: print("PASSED: createArray({}) returned {}" .format(t[0], res)) else: print("FAILED: createArray({}) returned {}, answer: {}" .format(t[0], res, ans)) if __name__ == '__main__': main()
<commit_before><commit_msg>Solve Code Fights create array problem<commit_after>
#!/usr/local/bin/python # Code Fights Create Array Problem def createArray(size): return [1] * size def main(): tests = [ [4, [1, 1, 1, 1]], [2, [1, 1]], [1, [1]], [5, [1, 1, 1, 1, 1]] ] for t in tests: res = createArray(t[0]) ans = t[1] if ans == res: print("PASSED: createArray({}) returned {}" .format(t[0], res)) else: print("FAILED: createArray({}) returned {}, answer: {}" .format(t[0], res, ans)) if __name__ == '__main__': main()
Solve Code Fights create array problem#!/usr/local/bin/python # Code Fights Create Array Problem def createArray(size): return [1] * size def main(): tests = [ [4, [1, 1, 1, 1]], [2, [1, 1]], [1, [1]], [5, [1, 1, 1, 1, 1]] ] for t in tests: res = createArray(t[0]) ans = t[1] if ans == res: print("PASSED: createArray({}) returned {}" .format(t[0], res)) else: print("FAILED: createArray({}) returned {}, answer: {}" .format(t[0], res, ans)) if __name__ == '__main__': main()
<commit_before><commit_msg>Solve Code Fights create array problem<commit_after>#!/usr/local/bin/python # Code Fights Create Array Problem def createArray(size): return [1] * size def main(): tests = [ [4, [1, 1, 1, 1]], [2, [1, 1]], [1, [1]], [5, [1, 1, 1, 1, 1]] ] for t in tests: res = createArray(t[0]) ans = t[1] if ans == res: print("PASSED: createArray({}) returned {}" .format(t[0], res)) else: print("FAILED: createArray({}) returned {}, answer: {}" .format(t[0], res, ans)) if __name__ == '__main__': main()
72806f06d08ceeae2c26198afdccf39832fb1c43
ms/storage/generaluuid.py
ms/storage/generaluuid.py
""" Copyright 2015 The Trustees of Princeton University Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import random from google.appengine.api import memcache from google.appengine.ext import ndb import backends as backend UUID_KEY_TEMPLATE = 'uuid-{}-{}' class GeneralUUID(ndb.Model): """ Shards for each named counter. """ uuid = ndb.StringProperty(default="") def __uuid_key_names( uuids, namespace ): key_strings = [UUID_KEY_TEMPLATE.format(uuid, namespace) for uuid in uuids] return key_strings @ndb.tasklet def get_uuids_async( uuids, namespace, use_memcache=True ): """ Get all UUIDs under a namespace """ do_cache = False all_key_names = __uuid_key_names( uuids, namespace ) all_keys = [ndb.Key( GeneralUUID, key_name ) for key_name in all_key_names] uuid_data = yield ndb.get_multi_async( all_keys, use_cache=False, use_memcache=use_memcache ) # NOTE: uuid_data is ordered by uuids raise ndb.Return( [uuid.uuid if uuid is not None else None for uuid in uuid_data] ) def get_uuids( uuids, namespace, use_memcache=True ): uuid_data_fut = get_uuids_async( uuids, namespace, use_memcache ) return uuid_data_fut.get_result() @ndb.tasklet def put_uuids_async( uuids, namespace ): """ Put UUIDs to a namespace """ all_key_names = __uuid_key_names( uuids, namespace ) all_keys = [ndb.Key( GeneralUUID, key_name ) for key_name in all_key_names] uuid_data = [ GeneralUUID( key=key, uuid=uuid ) for (key, uuid) in zip( all_keys, uuids ) ] put_keys = yield ndb.put_multi_async( uuid_data ) raise ndb.Return( put_keys )
Store namespaced UUIDs, i.e., to prevent replay attacks on JSON RPC calls.
Store namespaced UUIDs, i.e., to prevent replay attacks on JSON RPC calls.
Python
apache-2.0
iychoi/syndicate,iychoi/syndicate,jcnelson/syndicate,jcnelson/syndicate,iychoi/syndicate,iychoi/syndicate,jcnelson/syndicate,iychoi/syndicate,jcnelson/syndicate,iychoi/syndicate,iychoi/syndicate,jcnelson/syndicate,jcnelson/syndicate,jcnelson/syndicate,iychoi/syndicate,jcnelson/syndicate
Store namespaced UUIDs, i.e., to prevent replay attacks on JSON RPC calls.
""" Copyright 2015 The Trustees of Princeton University Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import random from google.appengine.api import memcache from google.appengine.ext import ndb import backends as backend UUID_KEY_TEMPLATE = 'uuid-{}-{}' class GeneralUUID(ndb.Model): """ Shards for each named counter. """ uuid = ndb.StringProperty(default="") def __uuid_key_names( uuids, namespace ): key_strings = [UUID_KEY_TEMPLATE.format(uuid, namespace) for uuid in uuids] return key_strings @ndb.tasklet def get_uuids_async( uuids, namespace, use_memcache=True ): """ Get all UUIDs under a namespace """ do_cache = False all_key_names = __uuid_key_names( uuids, namespace ) all_keys = [ndb.Key( GeneralUUID, key_name ) for key_name in all_key_names] uuid_data = yield ndb.get_multi_async( all_keys, use_cache=False, use_memcache=use_memcache ) # NOTE: uuid_data is ordered by uuids raise ndb.Return( [uuid.uuid if uuid is not None else None for uuid in uuid_data] ) def get_uuids( uuids, namespace, use_memcache=True ): uuid_data_fut = get_uuids_async( uuids, namespace, use_memcache ) return uuid_data_fut.get_result() @ndb.tasklet def put_uuids_async( uuids, namespace ): """ Put UUIDs to a namespace """ all_key_names = __uuid_key_names( uuids, namespace ) all_keys = [ndb.Key( GeneralUUID, key_name ) for key_name in all_key_names] uuid_data = [ GeneralUUID( key=key, uuid=uuid ) for (key, uuid) in zip( all_keys, uuids ) ] put_keys = yield ndb.put_multi_async( uuid_data ) raise ndb.Return( put_keys )
<commit_before><commit_msg>Store namespaced UUIDs, i.e., to prevent replay attacks on JSON RPC calls.<commit_after>
""" Copyright 2015 The Trustees of Princeton University Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import random from google.appengine.api import memcache from google.appengine.ext import ndb import backends as backend UUID_KEY_TEMPLATE = 'uuid-{}-{}' class GeneralUUID(ndb.Model): """ Shards for each named counter. """ uuid = ndb.StringProperty(default="") def __uuid_key_names( uuids, namespace ): key_strings = [UUID_KEY_TEMPLATE.format(uuid, namespace) for uuid in uuids] return key_strings @ndb.tasklet def get_uuids_async( uuids, namespace, use_memcache=True ): """ Get all UUIDs under a namespace """ do_cache = False all_key_names = __uuid_key_names( uuids, namespace ) all_keys = [ndb.Key( GeneralUUID, key_name ) for key_name in all_key_names] uuid_data = yield ndb.get_multi_async( all_keys, use_cache=False, use_memcache=use_memcache ) # NOTE: uuid_data is ordered by uuids raise ndb.Return( [uuid.uuid if uuid is not None else None for uuid in uuid_data] ) def get_uuids( uuids, namespace, use_memcache=True ): uuid_data_fut = get_uuids_async( uuids, namespace, use_memcache ) return uuid_data_fut.get_result() @ndb.tasklet def put_uuids_async( uuids, namespace ): """ Put UUIDs to a namespace """ all_key_names = __uuid_key_names( uuids, namespace ) all_keys = [ndb.Key( GeneralUUID, key_name ) for key_name in all_key_names] uuid_data = [ GeneralUUID( key=key, uuid=uuid ) for (key, uuid) in zip( all_keys, uuids ) ] put_keys = yield ndb.put_multi_async( uuid_data ) raise ndb.Return( put_keys )
Store namespaced UUIDs, i.e., to prevent replay attacks on JSON RPC calls.""" Copyright 2015 The Trustees of Princeton University Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import random from google.appengine.api import memcache from google.appengine.ext import ndb import backends as backend UUID_KEY_TEMPLATE = 'uuid-{}-{}' class GeneralUUID(ndb.Model): """ Shards for each named counter. """ uuid = ndb.StringProperty(default="") def __uuid_key_names( uuids, namespace ): key_strings = [UUID_KEY_TEMPLATE.format(uuid, namespace) for uuid in uuids] return key_strings @ndb.tasklet def get_uuids_async( uuids, namespace, use_memcache=True ): """ Get all UUIDs under a namespace """ do_cache = False all_key_names = __uuid_key_names( uuids, namespace ) all_keys = [ndb.Key( GeneralUUID, key_name ) for key_name in all_key_names] uuid_data = yield ndb.get_multi_async( all_keys, use_cache=False, use_memcache=use_memcache ) # NOTE: uuid_data is ordered by uuids raise ndb.Return( [uuid.uuid if uuid is not None else None for uuid in uuid_data] ) def get_uuids( uuids, namespace, use_memcache=True ): uuid_data_fut = get_uuids_async( uuids, namespace, use_memcache ) return uuid_data_fut.get_result() @ndb.tasklet def put_uuids_async( uuids, namespace ): """ Put UUIDs to a namespace """ all_key_names = __uuid_key_names( uuids, namespace ) all_keys = [ndb.Key( GeneralUUID, key_name ) for key_name in all_key_names] uuid_data = [ GeneralUUID( key=key, uuid=uuid ) for (key, uuid) in zip( all_keys, uuids ) ] put_keys = yield ndb.put_multi_async( uuid_data ) raise ndb.Return( put_keys )
<commit_before><commit_msg>Store namespaced UUIDs, i.e., to prevent replay attacks on JSON RPC calls.<commit_after>""" Copyright 2015 The Trustees of Princeton University Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import random from google.appengine.api import memcache from google.appengine.ext import ndb import backends as backend UUID_KEY_TEMPLATE = 'uuid-{}-{}' class GeneralUUID(ndb.Model): """ Shards for each named counter. """ uuid = ndb.StringProperty(default="") def __uuid_key_names( uuids, namespace ): key_strings = [UUID_KEY_TEMPLATE.format(uuid, namespace) for uuid in uuids] return key_strings @ndb.tasklet def get_uuids_async( uuids, namespace, use_memcache=True ): """ Get all UUIDs under a namespace """ do_cache = False all_key_names = __uuid_key_names( uuids, namespace ) all_keys = [ndb.Key( GeneralUUID, key_name ) for key_name in all_key_names] uuid_data = yield ndb.get_multi_async( all_keys, use_cache=False, use_memcache=use_memcache ) # NOTE: uuid_data is ordered by uuids raise ndb.Return( [uuid.uuid if uuid is not None else None for uuid in uuid_data] ) def get_uuids( uuids, namespace, use_memcache=True ): uuid_data_fut = get_uuids_async( uuids, namespace, use_memcache ) return uuid_data_fut.get_result() @ndb.tasklet def put_uuids_async( uuids, namespace ): """ Put UUIDs to a namespace """ all_key_names = __uuid_key_names( uuids, namespace ) all_keys = [ndb.Key( GeneralUUID, key_name ) for key_name in all_key_names] uuid_data = [ GeneralUUID( key=key, uuid=uuid ) for (key, uuid) in zip( all_keys, uuids ) ] put_keys = yield ndb.put_multi_async( uuid_data ) raise ndb.Return( put_keys )
ebd72b546500a9556e1df99768aa200cf8860628
happy_plant.py
happy_plant.py
from microbit import * import math KNOWN_RES = 1181 V_IN = 3 R_BAD = 900 R_OK = 700 BAD = ( (9, 9, 0, 9, 9), (0, 9, 0, 9, 0), (0, 0, 0, 0, 0), (0, 9, 9, 9, 0), (9, 0, 0, 0, 9), ) OK = ( (0, 9, 0, 9, 0), (0, 9, 0, 9, 0), (0, 0, 0, 0, 0), (9, 9, 9, 9, 9), (0, 0, 0, 0, 0), ) GOOD = ( (0, 9, 0, 9, 0), (0, 9, 0, 9, 0), (0, 0, 0, 0, 0), (9, 0, 0, 0, 9), (0, 9, 9, 9, 0), ) def to_display(matrix): for y, row in enumerate(matrix): for x, col in enumerate(row): display.set_pixel(x, y, col) def get_res(): pin_val = pin0.read_analog() v_out = (V_IN * pin_val) / 1023 return math.fabs(KNOWN_RES * (1 / (V_IN / v_out) - 1)) while True: res = get_res() print(res) if res > R_BAD: to_display(BAD) elif res > R_OK: to_display(OK) else: to_display(GOOD) print(res) sleep(500)
Add plant happiness detector (moisture meter)
Add plant happiness detector (moisture meter) Essentially an Ohm-meter with feelings.
Python
apache-2.0
Geekfish/microbit-week
Add plant happiness detector (moisture meter) Essentially an Ohm-meter with feelings.
from microbit import * import math KNOWN_RES = 1181 V_IN = 3 R_BAD = 900 R_OK = 700 BAD = ( (9, 9, 0, 9, 9), (0, 9, 0, 9, 0), (0, 0, 0, 0, 0), (0, 9, 9, 9, 0), (9, 0, 0, 0, 9), ) OK = ( (0, 9, 0, 9, 0), (0, 9, 0, 9, 0), (0, 0, 0, 0, 0), (9, 9, 9, 9, 9), (0, 0, 0, 0, 0), ) GOOD = ( (0, 9, 0, 9, 0), (0, 9, 0, 9, 0), (0, 0, 0, 0, 0), (9, 0, 0, 0, 9), (0, 9, 9, 9, 0), ) def to_display(matrix): for y, row in enumerate(matrix): for x, col in enumerate(row): display.set_pixel(x, y, col) def get_res(): pin_val = pin0.read_analog() v_out = (V_IN * pin_val) / 1023 return math.fabs(KNOWN_RES * (1 / (V_IN / v_out) - 1)) while True: res = get_res() print(res) if res > R_BAD: to_display(BAD) elif res > R_OK: to_display(OK) else: to_display(GOOD) print(res) sleep(500)
<commit_before><commit_msg>Add plant happiness detector (moisture meter) Essentially an Ohm-meter with feelings.<commit_after>
from microbit import * import math KNOWN_RES = 1181 V_IN = 3 R_BAD = 900 R_OK = 700 BAD = ( (9, 9, 0, 9, 9), (0, 9, 0, 9, 0), (0, 0, 0, 0, 0), (0, 9, 9, 9, 0), (9, 0, 0, 0, 9), ) OK = ( (0, 9, 0, 9, 0), (0, 9, 0, 9, 0), (0, 0, 0, 0, 0), (9, 9, 9, 9, 9), (0, 0, 0, 0, 0), ) GOOD = ( (0, 9, 0, 9, 0), (0, 9, 0, 9, 0), (0, 0, 0, 0, 0), (9, 0, 0, 0, 9), (0, 9, 9, 9, 0), ) def to_display(matrix): for y, row in enumerate(matrix): for x, col in enumerate(row): display.set_pixel(x, y, col) def get_res(): pin_val = pin0.read_analog() v_out = (V_IN * pin_val) / 1023 return math.fabs(KNOWN_RES * (1 / (V_IN / v_out) - 1)) while True: res = get_res() print(res) if res > R_BAD: to_display(BAD) elif res > R_OK: to_display(OK) else: to_display(GOOD) print(res) sleep(500)
Add plant happiness detector (moisture meter) Essentially an Ohm-meter with feelings.from microbit import * import math KNOWN_RES = 1181 V_IN = 3 R_BAD = 900 R_OK = 700 BAD = ( (9, 9, 0, 9, 9), (0, 9, 0, 9, 0), (0, 0, 0, 0, 0), (0, 9, 9, 9, 0), (9, 0, 0, 0, 9), ) OK = ( (0, 9, 0, 9, 0), (0, 9, 0, 9, 0), (0, 0, 0, 0, 0), (9, 9, 9, 9, 9), (0, 0, 0, 0, 0), ) GOOD = ( (0, 9, 0, 9, 0), (0, 9, 0, 9, 0), (0, 0, 0, 0, 0), (9, 0, 0, 0, 9), (0, 9, 9, 9, 0), ) def to_display(matrix): for y, row in enumerate(matrix): for x, col in enumerate(row): display.set_pixel(x, y, col) def get_res(): pin_val = pin0.read_analog() v_out = (V_IN * pin_val) / 1023 return math.fabs(KNOWN_RES * (1 / (V_IN / v_out) - 1)) while True: res = get_res() print(res) if res > R_BAD: to_display(BAD) elif res > R_OK: to_display(OK) else: to_display(GOOD) print(res) sleep(500)
<commit_before><commit_msg>Add plant happiness detector (moisture meter) Essentially an Ohm-meter with feelings.<commit_after>from microbit import * import math KNOWN_RES = 1181 V_IN = 3 R_BAD = 900 R_OK = 700 BAD = ( (9, 9, 0, 9, 9), (0, 9, 0, 9, 0), (0, 0, 0, 0, 0), (0, 9, 9, 9, 0), (9, 0, 0, 0, 9), ) OK = ( (0, 9, 0, 9, 0), (0, 9, 0, 9, 0), (0, 0, 0, 0, 0), (9, 9, 9, 9, 9), (0, 0, 0, 0, 0), ) GOOD = ( (0, 9, 0, 9, 0), (0, 9, 0, 9, 0), (0, 0, 0, 0, 0), (9, 0, 0, 0, 9), (0, 9, 9, 9, 0), ) def to_display(matrix): for y, row in enumerate(matrix): for x, col in enumerate(row): display.set_pixel(x, y, col) def get_res(): pin_val = pin0.read_analog() v_out = (V_IN * pin_val) / 1023 return math.fabs(KNOWN_RES * (1 / (V_IN / v_out) - 1)) while True: res = get_res() print(res) if res > R_BAD: to_display(BAD) elif res > R_OK: to_display(OK) else: to_display(GOOD) print(res) sleep(500)
f25f9bd3d33e04d7bcb6b8f4f655e18214bd95ad
src/armet/tests/test_response.py
src/armet/tests/test_response.py
# -*- coding: utf-8 -*- from hashlib import md5 from armet.utils import test class ResponseTestCase(test.TestCase): def setUp(self): super(ResponseTestCase, self).setUp() self.endpoint = '/' def test_content_md5(self): # Check some random endpoint endpoint = '{}choice/1'.format(self.endpoint) response = self.client.get(endpoint) # Assert we got a Content-MD5 header self.assertTrue(response.has_header('Content-MD5')) # Make an MD5 of the body. md5_body = md5(response.content).hexdigest() # Assert the MD5 is correct. self.assertEqual(response['Content-MD5'], md5_body)
Test case for Content-MD5 response.
Test case for Content-MD5 response.
Python
mit
armet/python-armet
Test case for Content-MD5 response.
# -*- coding: utf-8 -*- from hashlib import md5 from armet.utils import test class ResponseTestCase(test.TestCase): def setUp(self): super(ResponseTestCase, self).setUp() self.endpoint = '/' def test_content_md5(self): # Check some random endpoint endpoint = '{}choice/1'.format(self.endpoint) response = self.client.get(endpoint) # Assert we got a Content-MD5 header self.assertTrue(response.has_header('Content-MD5')) # Make an MD5 of the body. md5_body = md5(response.content).hexdigest() # Assert the MD5 is correct. self.assertEqual(response['Content-MD5'], md5_body)
<commit_before><commit_msg>Test case for Content-MD5 response.<commit_after>
# -*- coding: utf-8 -*- from hashlib import md5 from armet.utils import test class ResponseTestCase(test.TestCase): def setUp(self): super(ResponseTestCase, self).setUp() self.endpoint = '/' def test_content_md5(self): # Check some random endpoint endpoint = '{}choice/1'.format(self.endpoint) response = self.client.get(endpoint) # Assert we got a Content-MD5 header self.assertTrue(response.has_header('Content-MD5')) # Make an MD5 of the body. md5_body = md5(response.content).hexdigest() # Assert the MD5 is correct. self.assertEqual(response['Content-MD5'], md5_body)
Test case for Content-MD5 response.# -*- coding: utf-8 -*- from hashlib import md5 from armet.utils import test class ResponseTestCase(test.TestCase): def setUp(self): super(ResponseTestCase, self).setUp() self.endpoint = '/' def test_content_md5(self): # Check some random endpoint endpoint = '{}choice/1'.format(self.endpoint) response = self.client.get(endpoint) # Assert we got a Content-MD5 header self.assertTrue(response.has_header('Content-MD5')) # Make an MD5 of the body. md5_body = md5(response.content).hexdigest() # Assert the MD5 is correct. self.assertEqual(response['Content-MD5'], md5_body)
<commit_before><commit_msg>Test case for Content-MD5 response.<commit_after># -*- coding: utf-8 -*- from hashlib import md5 from armet.utils import test class ResponseTestCase(test.TestCase): def setUp(self): super(ResponseTestCase, self).setUp() self.endpoint = '/' def test_content_md5(self): # Check some random endpoint endpoint = '{}choice/1'.format(self.endpoint) response = self.client.get(endpoint) # Assert we got a Content-MD5 header self.assertTrue(response.has_header('Content-MD5')) # Make an MD5 of the body. md5_body = md5(response.content).hexdigest() # Assert the MD5 is correct. self.assertEqual(response['Content-MD5'], md5_body)
8c5736cfb09250537d6f6a48f9f73e878444d5b0
promgen/management/commands/queuecheck.py
promgen/management/commands/queuecheck.py
import logging import platform from celery import group from django.conf import settings from django.core.management.base import BaseCommand from promgen.celery import debug_task logger = logging.getLogger(__name__) class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument('--timeout', default=10, type=int) def handle(self, **kwargs): results = [] # Test individual Prometheus queues for host in settings.PROMGEN['prometheus'].get('servers'): queue, _ = host.split(':') logger.debug('Queueing URLs on %s', queue) results.append(debug_task.signature(queue=queue)) # Test queue for current server results.append(debug_task.signature(queue=platform.node())) # Get the result of all of our debug tasks group(results)().get(timeout=kwargs['timeout'])
Check to see if the queues are blocked
Check to see if the queues are blocked
Python
mit
kfdm/promgen,kfdm/promgen,kfdm/promgen,kfdm/promgen
Check to see if the queues are blocked
import logging import platform from celery import group from django.conf import settings from django.core.management.base import BaseCommand from promgen.celery import debug_task logger = logging.getLogger(__name__) class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument('--timeout', default=10, type=int) def handle(self, **kwargs): results = [] # Test individual Prometheus queues for host in settings.PROMGEN['prometheus'].get('servers'): queue, _ = host.split(':') logger.debug('Queueing URLs on %s', queue) results.append(debug_task.signature(queue=queue)) # Test queue for current server results.append(debug_task.signature(queue=platform.node())) # Get the result of all of our debug tasks group(results)().get(timeout=kwargs['timeout'])
<commit_before><commit_msg>Check to see if the queues are blocked<commit_after>
import logging import platform from celery import group from django.conf import settings from django.core.management.base import BaseCommand from promgen.celery import debug_task logger = logging.getLogger(__name__) class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument('--timeout', default=10, type=int) def handle(self, **kwargs): results = [] # Test individual Prometheus queues for host in settings.PROMGEN['prometheus'].get('servers'): queue, _ = host.split(':') logger.debug('Queueing URLs on %s', queue) results.append(debug_task.signature(queue=queue)) # Test queue for current server results.append(debug_task.signature(queue=platform.node())) # Get the result of all of our debug tasks group(results)().get(timeout=kwargs['timeout'])
Check to see if the queues are blockedimport logging import platform from celery import group from django.conf import settings from django.core.management.base import BaseCommand from promgen.celery import debug_task logger = logging.getLogger(__name__) class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument('--timeout', default=10, type=int) def handle(self, **kwargs): results = [] # Test individual Prometheus queues for host in settings.PROMGEN['prometheus'].get('servers'): queue, _ = host.split(':') logger.debug('Queueing URLs on %s', queue) results.append(debug_task.signature(queue=queue)) # Test queue for current server results.append(debug_task.signature(queue=platform.node())) # Get the result of all of our debug tasks group(results)().get(timeout=kwargs['timeout'])
<commit_before><commit_msg>Check to see if the queues are blocked<commit_after>import logging import platform from celery import group from django.conf import settings from django.core.management.base import BaseCommand from promgen.celery import debug_task logger = logging.getLogger(__name__) class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument('--timeout', default=10, type=int) def handle(self, **kwargs): results = [] # Test individual Prometheus queues for host in settings.PROMGEN['prometheus'].get('servers'): queue, _ = host.split(':') logger.debug('Queueing URLs on %s', queue) results.append(debug_task.signature(queue=queue)) # Test queue for current server results.append(debug_task.signature(queue=platform.node())) # Get the result of all of our debug tasks group(results)().get(timeout=kwargs['timeout'])
e0baa78241b73f977757823527394551d2e2059c
findatapy/util/swimpool.py
findatapy/util/swimpool.py
class SwimPool(object): def create_pool(self, thread_technique, thread_no): if thread_technique is "thread": from multiprocessing.dummy import Pool elif thread_technique is "multiprocessor": # most of the time is spend waiting for Bloomberg to return, so can use threads rather than multiprocessing # must use the multiprocessing_on_dill library otherwise can't pickle objects correctly # note: currently not very stable from multiprocessing_on_dill import Pool # from pathos.pools import ProcessPool as Pool return Pool(thread_no)
Add SwimPool wrapper for thread/multiprocessor
Add SwimPool wrapper for thread/multiprocessor
Python
apache-2.0
cuemacro/findatapy
Add SwimPool wrapper for thread/multiprocessor
class SwimPool(object): def create_pool(self, thread_technique, thread_no): if thread_technique is "thread": from multiprocessing.dummy import Pool elif thread_technique is "multiprocessor": # most of the time is spend waiting for Bloomberg to return, so can use threads rather than multiprocessing # must use the multiprocessing_on_dill library otherwise can't pickle objects correctly # note: currently not very stable from multiprocessing_on_dill import Pool # from pathos.pools import ProcessPool as Pool return Pool(thread_no)
<commit_before><commit_msg>Add SwimPool wrapper for thread/multiprocessor<commit_after>
class SwimPool(object): def create_pool(self, thread_technique, thread_no): if thread_technique is "thread": from multiprocessing.dummy import Pool elif thread_technique is "multiprocessor": # most of the time is spend waiting for Bloomberg to return, so can use threads rather than multiprocessing # must use the multiprocessing_on_dill library otherwise can't pickle objects correctly # note: currently not very stable from multiprocessing_on_dill import Pool # from pathos.pools import ProcessPool as Pool return Pool(thread_no)
Add SwimPool wrapper for thread/multiprocessorclass SwimPool(object): def create_pool(self, thread_technique, thread_no): if thread_technique is "thread": from multiprocessing.dummy import Pool elif thread_technique is "multiprocessor": # most of the time is spend waiting for Bloomberg to return, so can use threads rather than multiprocessing # must use the multiprocessing_on_dill library otherwise can't pickle objects correctly # note: currently not very stable from multiprocessing_on_dill import Pool # from pathos.pools import ProcessPool as Pool return Pool(thread_no)
<commit_before><commit_msg>Add SwimPool wrapper for thread/multiprocessor<commit_after>class SwimPool(object): def create_pool(self, thread_technique, thread_no): if thread_technique is "thread": from multiprocessing.dummy import Pool elif thread_technique is "multiprocessor": # most of the time is spend waiting for Bloomberg to return, so can use threads rather than multiprocessing # must use the multiprocessing_on_dill library otherwise can't pickle objects correctly # note: currently not very stable from multiprocessing_on_dill import Pool # from pathos.pools import ProcessPool as Pool return Pool(thread_no)
0866a865c48cc22219fe4c025d8e01f059c1eb2e
pyplotter.py
pyplotter.py
import argparse import serial try: import readline # noqa except ImportError: pass parser = argparse.ArgumentParser( description='CNC Import tool for Makeblock XY Plotter') parser.add_argument('device', help="Serial device to connect") parser.add_argument('-f', '--file', help="File to send") parser.add_argument('-b', '--baud', default=115200, help="Baud rate") def main(): arg = parser.parse_args() ser = serial.Serial(arg.device, arg.baud, timeout=1) wait_for(ser, "start") if not arg.file: while(True): inp = raw_input("> ") ser.write(inp) else: f = open(arg.file, 'r') for line in f: if line.rstrip() == "" or line.startswith("("): continue print "> %s" % line ser.write(line) wait_for(ser, "ok", "start") def wait_for(ser, *args): line = "" while line not in args: line = ser.readline().rstrip() if line is not None and line != "": print "< %s" % line if __name__ == "__main__": main()
Add script to import CNC files to plotter
Add script to import CNC files to plotter
Python
apache-2.0
mkoderer/pyplotter
Add script to import CNC files to plotter
import argparse import serial try: import readline # noqa except ImportError: pass parser = argparse.ArgumentParser( description='CNC Import tool for Makeblock XY Plotter') parser.add_argument('device', help="Serial device to connect") parser.add_argument('-f', '--file', help="File to send") parser.add_argument('-b', '--baud', default=115200, help="Baud rate") def main(): arg = parser.parse_args() ser = serial.Serial(arg.device, arg.baud, timeout=1) wait_for(ser, "start") if not arg.file: while(True): inp = raw_input("> ") ser.write(inp) else: f = open(arg.file, 'r') for line in f: if line.rstrip() == "" or line.startswith("("): continue print "> %s" % line ser.write(line) wait_for(ser, "ok", "start") def wait_for(ser, *args): line = "" while line not in args: line = ser.readline().rstrip() if line is not None and line != "": print "< %s" % line if __name__ == "__main__": main()
<commit_before><commit_msg>Add script to import CNC files to plotter<commit_after>
import argparse import serial try: import readline # noqa except ImportError: pass parser = argparse.ArgumentParser( description='CNC Import tool for Makeblock XY Plotter') parser.add_argument('device', help="Serial device to connect") parser.add_argument('-f', '--file', help="File to send") parser.add_argument('-b', '--baud', default=115200, help="Baud rate") def main(): arg = parser.parse_args() ser = serial.Serial(arg.device, arg.baud, timeout=1) wait_for(ser, "start") if not arg.file: while(True): inp = raw_input("> ") ser.write(inp) else: f = open(arg.file, 'r') for line in f: if line.rstrip() == "" or line.startswith("("): continue print "> %s" % line ser.write(line) wait_for(ser, "ok", "start") def wait_for(ser, *args): line = "" while line not in args: line = ser.readline().rstrip() if line is not None and line != "": print "< %s" % line if __name__ == "__main__": main()
Add script to import CNC files to plotterimport argparse import serial try: import readline # noqa except ImportError: pass parser = argparse.ArgumentParser( description='CNC Import tool for Makeblock XY Plotter') parser.add_argument('device', help="Serial device to connect") parser.add_argument('-f', '--file', help="File to send") parser.add_argument('-b', '--baud', default=115200, help="Baud rate") def main(): arg = parser.parse_args() ser = serial.Serial(arg.device, arg.baud, timeout=1) wait_for(ser, "start") if not arg.file: while(True): inp = raw_input("> ") ser.write(inp) else: f = open(arg.file, 'r') for line in f: if line.rstrip() == "" or line.startswith("("): continue print "> %s" % line ser.write(line) wait_for(ser, "ok", "start") def wait_for(ser, *args): line = "" while line not in args: line = ser.readline().rstrip() if line is not None and line != "": print "< %s" % line if __name__ == "__main__": main()
<commit_before><commit_msg>Add script to import CNC files to plotter<commit_after>import argparse import serial try: import readline # noqa except ImportError: pass parser = argparse.ArgumentParser( description='CNC Import tool for Makeblock XY Plotter') parser.add_argument('device', help="Serial device to connect") parser.add_argument('-f', '--file', help="File to send") parser.add_argument('-b', '--baud', default=115200, help="Baud rate") def main(): arg = parser.parse_args() ser = serial.Serial(arg.device, arg.baud, timeout=1) wait_for(ser, "start") if not arg.file: while(True): inp = raw_input("> ") ser.write(inp) else: f = open(arg.file, 'r') for line in f: if line.rstrip() == "" or line.startswith("("): continue print "> %s" % line ser.write(line) wait_for(ser, "ok", "start") def wait_for(ser, *args): line = "" while line not in args: line = ser.readline().rstrip() if line is not None and line != "": print "< %s" % line if __name__ == "__main__": main()
1404c37e1c593971c0a7e3020af12a762ff5ccc9
embed_all.py
embed_all.py
#!/usr/bin/env python3 import argparse from importlib import import_module from os.path import splitext, join as pjoin import cv2 import numpy as np import h5py import lib from lib.models import add_defaults if __name__ == '__main__': parser = argparse.ArgumentParser(description='Embed many images.') parser.add_argument('--basedir', default='.', help='Path to the folder containing all images.') parser.add_argument('--outfile', default='embeddings.h5', help='Name of the output hdf5 file in which to store the embeddings.') parser.add_argument('--model', default='lunet2', help='Name of the model to load. Corresponds to module names in lib/models. Or `fake`') parser.add_argument('--weights', default='/work/breuers/dukeMTMC/models/lunet2-final.pkl', help='Name of the weights to load for the model (path to .pkl file).') parser.add_argument('--scale', default=1.0, type=float, help='Scale factor to scale images before embedding them.') parser.add_argument('--t0', type=int) parser.add_argument('--t1', type=int) args = parser.parse_args() print(args) mod = import_module('lib.models.' + args.model) net = add_defaults(mod.mknet()) try: net.load(args.weights) except ValueError: print("!!!!!!!THE WEIGHTS YOU LOADED DON'T BELONG TO THE MODEL YOU'RE USING!!!!!!") raise # Shares the weights, just replaces the avg-pooling layer. net_hires = mod.hires_shared_twin(net) net_hires.evaluate() if args.t0 is None or args.t1 is None: all_files = sane_listdir(args.basedir, sortkey=lambda f: int(splitext(f)[0])) else: all_files = ['{}.jpg'.format(i) for i in range(args.t0, args.t1+1)] print("Precompiling network...", end='', flush=True) img = lib.imread(pjoin(args.basedir, all_files[0])) img = lib.img2df(img, lib.scale_shape(img.shape, args.scale)) out = net_hires.forward(img[None]) print(" Done", flush=True) with h5py.File(args.outfile, 'w') as f_out: ds = f_out.create_dataset('embs', shape=(len(all_files),) + out.shape[1:], dtype=out.dtype) for i, fname in enumerate(all_files): print("\r{} ({}/{})".format(fname, i, len(all_files)), end='', flush=True) img = lib.imread(pjoin(args.basedir, fname)) img = lib.img2df(img, lib.scale_shape(img.shape, args.scale)) ds[i] = net_hires.forward(img[None]) if i % 100 == 0: f_out.flush() print(" Done")
Add script to embed all images.
Add script to embed all images.
Python
mit
VisualComputingInstitute/towards-reid-tracking
Add script to embed all images.
#!/usr/bin/env python3 import argparse from importlib import import_module from os.path import splitext, join as pjoin import cv2 import numpy as np import h5py import lib from lib.models import add_defaults if __name__ == '__main__': parser = argparse.ArgumentParser(description='Embed many images.') parser.add_argument('--basedir', default='.', help='Path to the folder containing all images.') parser.add_argument('--outfile', default='embeddings.h5', help='Name of the output hdf5 file in which to store the embeddings.') parser.add_argument('--model', default='lunet2', help='Name of the model to load. Corresponds to module names in lib/models. Or `fake`') parser.add_argument('--weights', default='/work/breuers/dukeMTMC/models/lunet2-final.pkl', help='Name of the weights to load for the model (path to .pkl file).') parser.add_argument('--scale', default=1.0, type=float, help='Scale factor to scale images before embedding them.') parser.add_argument('--t0', type=int) parser.add_argument('--t1', type=int) args = parser.parse_args() print(args) mod = import_module('lib.models.' + args.model) net = add_defaults(mod.mknet()) try: net.load(args.weights) except ValueError: print("!!!!!!!THE WEIGHTS YOU LOADED DON'T BELONG TO THE MODEL YOU'RE USING!!!!!!") raise # Shares the weights, just replaces the avg-pooling layer. net_hires = mod.hires_shared_twin(net) net_hires.evaluate() if args.t0 is None or args.t1 is None: all_files = sane_listdir(args.basedir, sortkey=lambda f: int(splitext(f)[0])) else: all_files = ['{}.jpg'.format(i) for i in range(args.t0, args.t1+1)] print("Precompiling network...", end='', flush=True) img = lib.imread(pjoin(args.basedir, all_files[0])) img = lib.img2df(img, lib.scale_shape(img.shape, args.scale)) out = net_hires.forward(img[None]) print(" Done", flush=True) with h5py.File(args.outfile, 'w') as f_out: ds = f_out.create_dataset('embs', shape=(len(all_files),) + out.shape[1:], dtype=out.dtype) for i, fname in enumerate(all_files): print("\r{} ({}/{})".format(fname, i, len(all_files)), end='', flush=True) img = lib.imread(pjoin(args.basedir, fname)) img = lib.img2df(img, lib.scale_shape(img.shape, args.scale)) ds[i] = net_hires.forward(img[None]) if i % 100 == 0: f_out.flush() print(" Done")
<commit_before><commit_msg>Add script to embed all images.<commit_after>
#!/usr/bin/env python3 import argparse from importlib import import_module from os.path import splitext, join as pjoin import cv2 import numpy as np import h5py import lib from lib.models import add_defaults if __name__ == '__main__': parser = argparse.ArgumentParser(description='Embed many images.') parser.add_argument('--basedir', default='.', help='Path to the folder containing all images.') parser.add_argument('--outfile', default='embeddings.h5', help='Name of the output hdf5 file in which to store the embeddings.') parser.add_argument('--model', default='lunet2', help='Name of the model to load. Corresponds to module names in lib/models. Or `fake`') parser.add_argument('--weights', default='/work/breuers/dukeMTMC/models/lunet2-final.pkl', help='Name of the weights to load for the model (path to .pkl file).') parser.add_argument('--scale', default=1.0, type=float, help='Scale factor to scale images before embedding them.') parser.add_argument('--t0', type=int) parser.add_argument('--t1', type=int) args = parser.parse_args() print(args) mod = import_module('lib.models.' + args.model) net = add_defaults(mod.mknet()) try: net.load(args.weights) except ValueError: print("!!!!!!!THE WEIGHTS YOU LOADED DON'T BELONG TO THE MODEL YOU'RE USING!!!!!!") raise # Shares the weights, just replaces the avg-pooling layer. net_hires = mod.hires_shared_twin(net) net_hires.evaluate() if args.t0 is None or args.t1 is None: all_files = sane_listdir(args.basedir, sortkey=lambda f: int(splitext(f)[0])) else: all_files = ['{}.jpg'.format(i) for i in range(args.t0, args.t1+1)] print("Precompiling network...", end='', flush=True) img = lib.imread(pjoin(args.basedir, all_files[0])) img = lib.img2df(img, lib.scale_shape(img.shape, args.scale)) out = net_hires.forward(img[None]) print(" Done", flush=True) with h5py.File(args.outfile, 'w') as f_out: ds = f_out.create_dataset('embs', shape=(len(all_files),) + out.shape[1:], dtype=out.dtype) for i, fname in enumerate(all_files): print("\r{} ({}/{})".format(fname, i, len(all_files)), end='', flush=True) img = lib.imread(pjoin(args.basedir, fname)) img = lib.img2df(img, lib.scale_shape(img.shape, args.scale)) ds[i] = net_hires.forward(img[None]) if i % 100 == 0: f_out.flush() print(" Done")
Add script to embed all images.#!/usr/bin/env python3 import argparse from importlib import import_module from os.path import splitext, join as pjoin import cv2 import numpy as np import h5py import lib from lib.models import add_defaults if __name__ == '__main__': parser = argparse.ArgumentParser(description='Embed many images.') parser.add_argument('--basedir', default='.', help='Path to the folder containing all images.') parser.add_argument('--outfile', default='embeddings.h5', help='Name of the output hdf5 file in which to store the embeddings.') parser.add_argument('--model', default='lunet2', help='Name of the model to load. Corresponds to module names in lib/models. Or `fake`') parser.add_argument('--weights', default='/work/breuers/dukeMTMC/models/lunet2-final.pkl', help='Name of the weights to load for the model (path to .pkl file).') parser.add_argument('--scale', default=1.0, type=float, help='Scale factor to scale images before embedding them.') parser.add_argument('--t0', type=int) parser.add_argument('--t1', type=int) args = parser.parse_args() print(args) mod = import_module('lib.models.' + args.model) net = add_defaults(mod.mknet()) try: net.load(args.weights) except ValueError: print("!!!!!!!THE WEIGHTS YOU LOADED DON'T BELONG TO THE MODEL YOU'RE USING!!!!!!") raise # Shares the weights, just replaces the avg-pooling layer. net_hires = mod.hires_shared_twin(net) net_hires.evaluate() if args.t0 is None or args.t1 is None: all_files = sane_listdir(args.basedir, sortkey=lambda f: int(splitext(f)[0])) else: all_files = ['{}.jpg'.format(i) for i in range(args.t0, args.t1+1)] print("Precompiling network...", end='', flush=True) img = lib.imread(pjoin(args.basedir, all_files[0])) img = lib.img2df(img, lib.scale_shape(img.shape, args.scale)) out = net_hires.forward(img[None]) print(" Done", flush=True) with h5py.File(args.outfile, 'w') as f_out: ds = f_out.create_dataset('embs', shape=(len(all_files),) + out.shape[1:], dtype=out.dtype) for i, fname in enumerate(all_files): print("\r{} ({}/{})".format(fname, i, len(all_files)), end='', flush=True) img = lib.imread(pjoin(args.basedir, fname)) img = lib.img2df(img, lib.scale_shape(img.shape, args.scale)) ds[i] = net_hires.forward(img[None]) if i % 100 == 0: f_out.flush() print(" Done")
<commit_before><commit_msg>Add script to embed all images.<commit_after>#!/usr/bin/env python3 import argparse from importlib import import_module from os.path import splitext, join as pjoin import cv2 import numpy as np import h5py import lib from lib.models import add_defaults if __name__ == '__main__': parser = argparse.ArgumentParser(description='Embed many images.') parser.add_argument('--basedir', default='.', help='Path to the folder containing all images.') parser.add_argument('--outfile', default='embeddings.h5', help='Name of the output hdf5 file in which to store the embeddings.') parser.add_argument('--model', default='lunet2', help='Name of the model to load. Corresponds to module names in lib/models. Or `fake`') parser.add_argument('--weights', default='/work/breuers/dukeMTMC/models/lunet2-final.pkl', help='Name of the weights to load for the model (path to .pkl file).') parser.add_argument('--scale', default=1.0, type=float, help='Scale factor to scale images before embedding them.') parser.add_argument('--t0', type=int) parser.add_argument('--t1', type=int) args = parser.parse_args() print(args) mod = import_module('lib.models.' + args.model) net = add_defaults(mod.mknet()) try: net.load(args.weights) except ValueError: print("!!!!!!!THE WEIGHTS YOU LOADED DON'T BELONG TO THE MODEL YOU'RE USING!!!!!!") raise # Shares the weights, just replaces the avg-pooling layer. net_hires = mod.hires_shared_twin(net) net_hires.evaluate() if args.t0 is None or args.t1 is None: all_files = sane_listdir(args.basedir, sortkey=lambda f: int(splitext(f)[0])) else: all_files = ['{}.jpg'.format(i) for i in range(args.t0, args.t1+1)] print("Precompiling network...", end='', flush=True) img = lib.imread(pjoin(args.basedir, all_files[0])) img = lib.img2df(img, lib.scale_shape(img.shape, args.scale)) out = net_hires.forward(img[None]) print(" Done", flush=True) with h5py.File(args.outfile, 'w') as f_out: ds = f_out.create_dataset('embs', shape=(len(all_files),) + out.shape[1:], dtype=out.dtype) for i, fname in enumerate(all_files): print("\r{} ({}/{})".format(fname, i, len(all_files)), end='', flush=True) img = lib.imread(pjoin(args.basedir, fname)) img = lib.img2df(img, lib.scale_shape(img.shape, args.scale)) ds[i] = net_hires.forward(img[None]) if i % 100 == 0: f_out.flush() print(" Done")
7e25b5f85fa854df7265d0c73338556f2fd9dce1
src/scripts/detect_encoding.py
src/scripts/detect_encoding.py
import sys import os from chardet.universaldetector import UniversalDetector def report_encoding(path): file = open(path) detector = UniversalDetector() for line in file.readlines(): detector.feed(line) if detector.done: break detector.close() file.close() print detector.result["encoding"] def main(): if len(sys.argv) > 1 and os.path.isfile(sys.argv[1]): report_encoding(sys.argv[1]) else: print "None" if __name__ == "__main__": main()
Add script to detect file encoding
Add script to detect file encoding
Python
mit
PinPinIre/Final-Year-Project,PinPinIre/Final-Year-Project,PinPinIre/Final-Year-Project
Add script to detect file encoding
import sys import os from chardet.universaldetector import UniversalDetector def report_encoding(path): file = open(path) detector = UniversalDetector() for line in file.readlines(): detector.feed(line) if detector.done: break detector.close() file.close() print detector.result["encoding"] def main(): if len(sys.argv) > 1 and os.path.isfile(sys.argv[1]): report_encoding(sys.argv[1]) else: print "None" if __name__ == "__main__": main()
<commit_before><commit_msg>Add script to detect file encoding<commit_after>
import sys import os from chardet.universaldetector import UniversalDetector def report_encoding(path): file = open(path) detector = UniversalDetector() for line in file.readlines(): detector.feed(line) if detector.done: break detector.close() file.close() print detector.result["encoding"] def main(): if len(sys.argv) > 1 and os.path.isfile(sys.argv[1]): report_encoding(sys.argv[1]) else: print "None" if __name__ == "__main__": main()
Add script to detect file encodingimport sys import os from chardet.universaldetector import UniversalDetector def report_encoding(path): file = open(path) detector = UniversalDetector() for line in file.readlines(): detector.feed(line) if detector.done: break detector.close() file.close() print detector.result["encoding"] def main(): if len(sys.argv) > 1 and os.path.isfile(sys.argv[1]): report_encoding(sys.argv[1]) else: print "None" if __name__ == "__main__": main()
<commit_before><commit_msg>Add script to detect file encoding<commit_after>import sys import os from chardet.universaldetector import UniversalDetector def report_encoding(path): file = open(path) detector = UniversalDetector() for line in file.readlines(): detector.feed(line) if detector.done: break detector.close() file.close() print detector.result["encoding"] def main(): if len(sys.argv) > 1 and os.path.isfile(sys.argv[1]): report_encoding(sys.argv[1]) else: print "None" if __name__ == "__main__": main()
b0d22b76bdb4f695a3d1c3e8306523df1c538b67
test/units/TestModules.py
test/units/TestModules.py
# -*- coding: utf-8 -*- import os import ast import unittest from ansible import utils class TestModules(unittest.TestCase): def list_all_modules(self): paths = utils.plugins.module_finder._get_paths() paths = [x for x in paths if os.path.isdir(x)] module_list = [] for path in paths: for (dirpath, dirnames, filenames) in os.walk(path): for filename in filenames: module_list.append(os.path.join(dirpath, filename)) return module_list def test_ast_parse(self): module_list = self.list_all_modules() ERRORS = [] # attempt to parse each module with ast for m in module_list: try: ast.parse(''.join(open(m))) except Exception, e: ERRORS.append((m, e)) assert len(ERRORS) == 0, "get_docstring errors: %s" % ERRORS
Add an ast.parse unit test for modules to simulate ansible-doc
Add an ast.parse unit test for modules to simulate ansible-doc
Python
mit
thaim/ansible,thaim/ansible
Add an ast.parse unit test for modules to simulate ansible-doc
# -*- coding: utf-8 -*- import os import ast import unittest from ansible import utils class TestModules(unittest.TestCase): def list_all_modules(self): paths = utils.plugins.module_finder._get_paths() paths = [x for x in paths if os.path.isdir(x)] module_list = [] for path in paths: for (dirpath, dirnames, filenames) in os.walk(path): for filename in filenames: module_list.append(os.path.join(dirpath, filename)) return module_list def test_ast_parse(self): module_list = self.list_all_modules() ERRORS = [] # attempt to parse each module with ast for m in module_list: try: ast.parse(''.join(open(m))) except Exception, e: ERRORS.append((m, e)) assert len(ERRORS) == 0, "get_docstring errors: %s" % ERRORS
<commit_before><commit_msg>Add an ast.parse unit test for modules to simulate ansible-doc<commit_after>
# -*- coding: utf-8 -*- import os import ast import unittest from ansible import utils class TestModules(unittest.TestCase): def list_all_modules(self): paths = utils.plugins.module_finder._get_paths() paths = [x for x in paths if os.path.isdir(x)] module_list = [] for path in paths: for (dirpath, dirnames, filenames) in os.walk(path): for filename in filenames: module_list.append(os.path.join(dirpath, filename)) return module_list def test_ast_parse(self): module_list = self.list_all_modules() ERRORS = [] # attempt to parse each module with ast for m in module_list: try: ast.parse(''.join(open(m))) except Exception, e: ERRORS.append((m, e)) assert len(ERRORS) == 0, "get_docstring errors: %s" % ERRORS
Add an ast.parse unit test for modules to simulate ansible-doc# -*- coding: utf-8 -*- import os import ast import unittest from ansible import utils class TestModules(unittest.TestCase): def list_all_modules(self): paths = utils.plugins.module_finder._get_paths() paths = [x for x in paths if os.path.isdir(x)] module_list = [] for path in paths: for (dirpath, dirnames, filenames) in os.walk(path): for filename in filenames: module_list.append(os.path.join(dirpath, filename)) return module_list def test_ast_parse(self): module_list = self.list_all_modules() ERRORS = [] # attempt to parse each module with ast for m in module_list: try: ast.parse(''.join(open(m))) except Exception, e: ERRORS.append((m, e)) assert len(ERRORS) == 0, "get_docstring errors: %s" % ERRORS
<commit_before><commit_msg>Add an ast.parse unit test for modules to simulate ansible-doc<commit_after># -*- coding: utf-8 -*- import os import ast import unittest from ansible import utils class TestModules(unittest.TestCase): def list_all_modules(self): paths = utils.plugins.module_finder._get_paths() paths = [x for x in paths if os.path.isdir(x)] module_list = [] for path in paths: for (dirpath, dirnames, filenames) in os.walk(path): for filename in filenames: module_list.append(os.path.join(dirpath, filename)) return module_list def test_ast_parse(self): module_list = self.list_all_modules() ERRORS = [] # attempt to parse each module with ast for m in module_list: try: ast.parse(''.join(open(m))) except Exception, e: ERRORS.append((m, e)) assert len(ERRORS) == 0, "get_docstring errors: %s" % ERRORS
6ee465629a6a531ab29a664d3142a9005bdbb5cc
malaria24/ona/migrations/0025_smsevent.py
malaria24/ona/migrations/0025_smsevent.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('ona', '0024_inboundsms'), ] operations = [ migrations.CreateModel( name='SMSEvent', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('event_type', models.CharField(max_length=255)), ('timestamp', models.DateTimeField()), ('sms', models.ForeignKey(to='ona.SMS')), ], ), ]
Add migration for sms event model
Add migration for sms event model
Python
bsd-2-clause
praekelt/malaria24-django,praekelt/malaria24-django,praekelt/malaria24-django,praekelt/malaria24-django
Add migration for sms event model
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('ona', '0024_inboundsms'), ] operations = [ migrations.CreateModel( name='SMSEvent', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('event_type', models.CharField(max_length=255)), ('timestamp', models.DateTimeField()), ('sms', models.ForeignKey(to='ona.SMS')), ], ), ]
<commit_before><commit_msg>Add migration for sms event model<commit_after>
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('ona', '0024_inboundsms'), ] operations = [ migrations.CreateModel( name='SMSEvent', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('event_type', models.CharField(max_length=255)), ('timestamp', models.DateTimeField()), ('sms', models.ForeignKey(to='ona.SMS')), ], ), ]
Add migration for sms event model# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('ona', '0024_inboundsms'), ] operations = [ migrations.CreateModel( name='SMSEvent', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('event_type', models.CharField(max_length=255)), ('timestamp', models.DateTimeField()), ('sms', models.ForeignKey(to='ona.SMS')), ], ), ]
<commit_before><commit_msg>Add migration for sms event model<commit_after># -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('ona', '0024_inboundsms'), ] operations = [ migrations.CreateModel( name='SMSEvent', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('event_type', models.CharField(max_length=255)), ('timestamp', models.DateTimeField()), ('sms', models.ForeignKey(to='ona.SMS')), ], ), ]
bcb3b6879b70160598b98e87f0cf0b1b6aa8e1f1
scripts/dump_timestamp.py
scripts/dump_timestamp.py
#!/usr/bin/env python from pexif import JpegFile import sys usage = """Usage: dump_timestamp.py filename.jpg""" if len(sys.argv) != 2: print >> sys.stderr, usage sys.exit(1) try: ef = JpegFile.fromFile(sys.argv[1]) primary = ef.get_exif().get_primary() print "Primary DateTime :", primary.DateTime print "Extended DateTimeOriginal :", primary.ExtendedEXIF.DateTimeOriginal print "Extended DateTimeDigitized:", primary.ExtendedEXIF.DateTimeDigitized except IOError: type, value, traceback = sys.exc_info() print >> sys.stderr, "Error opening file:", value except JpegFile.InvalidFile: type, value, traceback = sys.exc_info() print >> sys.stderr, "Error opening file:", value
Add example of extracting various date/times.
Add example of extracting various date/times.
Python
mit
ebrelsford/pexif,untereiner/pexif,bennoleslie/pexif
Add example of extracting various date/times.
#!/usr/bin/env python from pexif import JpegFile import sys usage = """Usage: dump_timestamp.py filename.jpg""" if len(sys.argv) != 2: print >> sys.stderr, usage sys.exit(1) try: ef = JpegFile.fromFile(sys.argv[1]) primary = ef.get_exif().get_primary() print "Primary DateTime :", primary.DateTime print "Extended DateTimeOriginal :", primary.ExtendedEXIF.DateTimeOriginal print "Extended DateTimeDigitized:", primary.ExtendedEXIF.DateTimeDigitized except IOError: type, value, traceback = sys.exc_info() print >> sys.stderr, "Error opening file:", value except JpegFile.InvalidFile: type, value, traceback = sys.exc_info() print >> sys.stderr, "Error opening file:", value
<commit_before><commit_msg>Add example of extracting various date/times.<commit_after>
#!/usr/bin/env python from pexif import JpegFile import sys usage = """Usage: dump_timestamp.py filename.jpg""" if len(sys.argv) != 2: print >> sys.stderr, usage sys.exit(1) try: ef = JpegFile.fromFile(sys.argv[1]) primary = ef.get_exif().get_primary() print "Primary DateTime :", primary.DateTime print "Extended DateTimeOriginal :", primary.ExtendedEXIF.DateTimeOriginal print "Extended DateTimeDigitized:", primary.ExtendedEXIF.DateTimeDigitized except IOError: type, value, traceback = sys.exc_info() print >> sys.stderr, "Error opening file:", value except JpegFile.InvalidFile: type, value, traceback = sys.exc_info() print >> sys.stderr, "Error opening file:", value
Add example of extracting various date/times.#!/usr/bin/env python from pexif import JpegFile import sys usage = """Usage: dump_timestamp.py filename.jpg""" if len(sys.argv) != 2: print >> sys.stderr, usage sys.exit(1) try: ef = JpegFile.fromFile(sys.argv[1]) primary = ef.get_exif().get_primary() print "Primary DateTime :", primary.DateTime print "Extended DateTimeOriginal :", primary.ExtendedEXIF.DateTimeOriginal print "Extended DateTimeDigitized:", primary.ExtendedEXIF.DateTimeDigitized except IOError: type, value, traceback = sys.exc_info() print >> sys.stderr, "Error opening file:", value except JpegFile.InvalidFile: type, value, traceback = sys.exc_info() print >> sys.stderr, "Error opening file:", value
<commit_before><commit_msg>Add example of extracting various date/times.<commit_after>#!/usr/bin/env python from pexif import JpegFile import sys usage = """Usage: dump_timestamp.py filename.jpg""" if len(sys.argv) != 2: print >> sys.stderr, usage sys.exit(1) try: ef = JpegFile.fromFile(sys.argv[1]) primary = ef.get_exif().get_primary() print "Primary DateTime :", primary.DateTime print "Extended DateTimeOriginal :", primary.ExtendedEXIF.DateTimeOriginal print "Extended DateTimeDigitized:", primary.ExtendedEXIF.DateTimeDigitized except IOError: type, value, traceback = sys.exc_info() print >> sys.stderr, "Error opening file:", value except JpegFile.InvalidFile: type, value, traceback = sys.exc_info() print >> sys.stderr, "Error opening file:", value
b0261ec4757167cb3d5bf8ab3ded0273eb9477de
txircd/modules/umode_s.py
txircd/modules/umode_s.py
from txircd.modbase import Mode class ServerNoticeMode(Mode): pass class Spawner(object): def __init__(self, ircd): self.ircd = ircd def spawn(self): return { "modes": { "uns": ServerNoticeMode() } } def cleanup(self): self.ircd.removeMode("uns")
Implement usermode +s (currently doesn't do anything)
Implement usermode +s (currently doesn't do anything)
Python
bsd-3-clause
ElementalAlchemist/txircd,DesertBus/txircd,Heufneutje/txircd
Implement usermode +s (currently doesn't do anything)
from txircd.modbase import Mode class ServerNoticeMode(Mode): pass class Spawner(object): def __init__(self, ircd): self.ircd = ircd def spawn(self): return { "modes": { "uns": ServerNoticeMode() } } def cleanup(self): self.ircd.removeMode("uns")
<commit_before><commit_msg>Implement usermode +s (currently doesn't do anything)<commit_after>
from txircd.modbase import Mode class ServerNoticeMode(Mode): pass class Spawner(object): def __init__(self, ircd): self.ircd = ircd def spawn(self): return { "modes": { "uns": ServerNoticeMode() } } def cleanup(self): self.ircd.removeMode("uns")
Implement usermode +s (currently doesn't do anything)from txircd.modbase import Mode class ServerNoticeMode(Mode): pass class Spawner(object): def __init__(self, ircd): self.ircd = ircd def spawn(self): return { "modes": { "uns": ServerNoticeMode() } } def cleanup(self): self.ircd.removeMode("uns")
<commit_before><commit_msg>Implement usermode +s (currently doesn't do anything)<commit_after>from txircd.modbase import Mode class ServerNoticeMode(Mode): pass class Spawner(object): def __init__(self, ircd): self.ircd = ircd def spawn(self): return { "modes": { "uns": ServerNoticeMode() } } def cleanup(self): self.ircd.removeMode("uns")
12c53bc50612dac2fc95503c1319ea1944ce3dfd
tests/datastructs/test_datastructs_dict.py
tests/datastructs/test_datastructs_dict.py
import pytest from cafeteria.datastructs.dict import MergingDict, DeepMergingDict @pytest.fixture def simple_dict(): return {"dict": {"one": 1, "nested": {"a": "a"}}, "list": [1]} @pytest.fixture def simple_dict_update(): return {"dict": {"two": 2, "nested": {"b": "b"}}, "list": [2, 3]} class TestMergingDict: def test_simple_merge(self, simple_dict, simple_dict_update): d = MergingDict(simple_dict) d.update(simple_dict_update) assert d == { "dict": {"one": 1, "two": 2, "nested": {"b": "b"}}, "list": [1, 2, 3], } def test_merge_with_key(self, simple_dict): d = MergingDict(simple_dict) d["dict"] = {"two": 2} d["list"] = [2, 3] assert d == { "dict": {"one": 1, "two": 2, "nested": {"a": "a"}}, "list": [1, 2, 3], } def test_changed_type_value(self, simple_dict): d = MergingDict(simple_dict) d.update({"dict": 0}) assert d == {"dict": 0, "list": [1]} d["dict"] = {"z": "z"} assert d["dict"] == {"z": "z"} class TestDeepMergingDict: def test_simple_deep_merge(self, simple_dict, simple_dict_update): d = DeepMergingDict(simple_dict) d.update(simple_dict_update) assert d == { "dict": {"one": 1, "two": 2, "nested": {"a": "a", "b": "b"}}, "list": [1, 2, 3], }
Add simple test cases for merging dict
Add simple test cases for merging dict
Python
apache-2.0
abn/python-cafe
Add simple test cases for merging dict
import pytest from cafeteria.datastructs.dict import MergingDict, DeepMergingDict @pytest.fixture def simple_dict(): return {"dict": {"one": 1, "nested": {"a": "a"}}, "list": [1]} @pytest.fixture def simple_dict_update(): return {"dict": {"two": 2, "nested": {"b": "b"}}, "list": [2, 3]} class TestMergingDict: def test_simple_merge(self, simple_dict, simple_dict_update): d = MergingDict(simple_dict) d.update(simple_dict_update) assert d == { "dict": {"one": 1, "two": 2, "nested": {"b": "b"}}, "list": [1, 2, 3], } def test_merge_with_key(self, simple_dict): d = MergingDict(simple_dict) d["dict"] = {"two": 2} d["list"] = [2, 3] assert d == { "dict": {"one": 1, "two": 2, "nested": {"a": "a"}}, "list": [1, 2, 3], } def test_changed_type_value(self, simple_dict): d = MergingDict(simple_dict) d.update({"dict": 0}) assert d == {"dict": 0, "list": [1]} d["dict"] = {"z": "z"} assert d["dict"] == {"z": "z"} class TestDeepMergingDict: def test_simple_deep_merge(self, simple_dict, simple_dict_update): d = DeepMergingDict(simple_dict) d.update(simple_dict_update) assert d == { "dict": {"one": 1, "two": 2, "nested": {"a": "a", "b": "b"}}, "list": [1, 2, 3], }
<commit_before><commit_msg>Add simple test cases for merging dict<commit_after>
import pytest from cafeteria.datastructs.dict import MergingDict, DeepMergingDict @pytest.fixture def simple_dict(): return {"dict": {"one": 1, "nested": {"a": "a"}}, "list": [1]} @pytest.fixture def simple_dict_update(): return {"dict": {"two": 2, "nested": {"b": "b"}}, "list": [2, 3]} class TestMergingDict: def test_simple_merge(self, simple_dict, simple_dict_update): d = MergingDict(simple_dict) d.update(simple_dict_update) assert d == { "dict": {"one": 1, "two": 2, "nested": {"b": "b"}}, "list": [1, 2, 3], } def test_merge_with_key(self, simple_dict): d = MergingDict(simple_dict) d["dict"] = {"two": 2} d["list"] = [2, 3] assert d == { "dict": {"one": 1, "two": 2, "nested": {"a": "a"}}, "list": [1, 2, 3], } def test_changed_type_value(self, simple_dict): d = MergingDict(simple_dict) d.update({"dict": 0}) assert d == {"dict": 0, "list": [1]} d["dict"] = {"z": "z"} assert d["dict"] == {"z": "z"} class TestDeepMergingDict: def test_simple_deep_merge(self, simple_dict, simple_dict_update): d = DeepMergingDict(simple_dict) d.update(simple_dict_update) assert d == { "dict": {"one": 1, "two": 2, "nested": {"a": "a", "b": "b"}}, "list": [1, 2, 3], }
Add simple test cases for merging dictimport pytest from cafeteria.datastructs.dict import MergingDict, DeepMergingDict @pytest.fixture def simple_dict(): return {"dict": {"one": 1, "nested": {"a": "a"}}, "list": [1]} @pytest.fixture def simple_dict_update(): return {"dict": {"two": 2, "nested": {"b": "b"}}, "list": [2, 3]} class TestMergingDict: def test_simple_merge(self, simple_dict, simple_dict_update): d = MergingDict(simple_dict) d.update(simple_dict_update) assert d == { "dict": {"one": 1, "two": 2, "nested": {"b": "b"}}, "list": [1, 2, 3], } def test_merge_with_key(self, simple_dict): d = MergingDict(simple_dict) d["dict"] = {"two": 2} d["list"] = [2, 3] assert d == { "dict": {"one": 1, "two": 2, "nested": {"a": "a"}}, "list": [1, 2, 3], } def test_changed_type_value(self, simple_dict): d = MergingDict(simple_dict) d.update({"dict": 0}) assert d == {"dict": 0, "list": [1]} d["dict"] = {"z": "z"} assert d["dict"] == {"z": "z"} class TestDeepMergingDict: def test_simple_deep_merge(self, simple_dict, simple_dict_update): d = DeepMergingDict(simple_dict) d.update(simple_dict_update) assert d == { "dict": {"one": 1, "two": 2, "nested": {"a": "a", "b": "b"}}, "list": [1, 2, 3], }
<commit_before><commit_msg>Add simple test cases for merging dict<commit_after>import pytest from cafeteria.datastructs.dict import MergingDict, DeepMergingDict @pytest.fixture def simple_dict(): return {"dict": {"one": 1, "nested": {"a": "a"}}, "list": [1]} @pytest.fixture def simple_dict_update(): return {"dict": {"two": 2, "nested": {"b": "b"}}, "list": [2, 3]} class TestMergingDict: def test_simple_merge(self, simple_dict, simple_dict_update): d = MergingDict(simple_dict) d.update(simple_dict_update) assert d == { "dict": {"one": 1, "two": 2, "nested": {"b": "b"}}, "list": [1, 2, 3], } def test_merge_with_key(self, simple_dict): d = MergingDict(simple_dict) d["dict"] = {"two": 2} d["list"] = [2, 3] assert d == { "dict": {"one": 1, "two": 2, "nested": {"a": "a"}}, "list": [1, 2, 3], } def test_changed_type_value(self, simple_dict): d = MergingDict(simple_dict) d.update({"dict": 0}) assert d == {"dict": 0, "list": [1]} d["dict"] = {"z": "z"} assert d["dict"] == {"z": "z"} class TestDeepMergingDict: def test_simple_deep_merge(self, simple_dict, simple_dict_update): d = DeepMergingDict(simple_dict) d.update(simple_dict_update) assert d == { "dict": {"one": 1, "two": 2, "nested": {"a": "a", "b": "b"}}, "list": [1, 2, 3], }
d61cfafedd098d1fa06fb662406ef98663b0eba9
txircd/modules/cmode_s.py
txircd/modules/cmode_s.py
from txircd.modbase import Mode class SecretMode(Mode): def listOutput(self, command, data): if command != "LIST": return data cdata = data["cdata"] if "s" in cdata["modes"] and cdata["name"] not in data["user"].channels: data["cdata"] = {} # other +s stuff is hiding in other modules. class Spawner(object): def __init__(self, ircd): self.ircd = ircd self.mode_s = None def spawn(self): self.mode_s = SecretMode() return { "modes": { "cns": self.mode_s }, "actions": { "commandextra": [self.mode_s.listOutput] } def cleanup(self): self.ircd.removeMode("cns") self.ircd.actions["commandextra"].remove(self.mode_s.listOutput)
Implement channel mode +s (or what's left of it)
Implement channel mode +s (or what's left of it)
Python
bsd-3-clause
Heufneutje/txircd,ElementalAlchemist/txircd,DesertBus/txircd
Implement channel mode +s (or what's left of it)
from txircd.modbase import Mode class SecretMode(Mode): def listOutput(self, command, data): if command != "LIST": return data cdata = data["cdata"] if "s" in cdata["modes"] and cdata["name"] not in data["user"].channels: data["cdata"] = {} # other +s stuff is hiding in other modules. class Spawner(object): def __init__(self, ircd): self.ircd = ircd self.mode_s = None def spawn(self): self.mode_s = SecretMode() return { "modes": { "cns": self.mode_s }, "actions": { "commandextra": [self.mode_s.listOutput] } def cleanup(self): self.ircd.removeMode("cns") self.ircd.actions["commandextra"].remove(self.mode_s.listOutput)
<commit_before><commit_msg>Implement channel mode +s (or what's left of it)<commit_after>
from txircd.modbase import Mode class SecretMode(Mode): def listOutput(self, command, data): if command != "LIST": return data cdata = data["cdata"] if "s" in cdata["modes"] and cdata["name"] not in data["user"].channels: data["cdata"] = {} # other +s stuff is hiding in other modules. class Spawner(object): def __init__(self, ircd): self.ircd = ircd self.mode_s = None def spawn(self): self.mode_s = SecretMode() return { "modes": { "cns": self.mode_s }, "actions": { "commandextra": [self.mode_s.listOutput] } def cleanup(self): self.ircd.removeMode("cns") self.ircd.actions["commandextra"].remove(self.mode_s.listOutput)
Implement channel mode +s (or what's left of it)from txircd.modbase import Mode class SecretMode(Mode): def listOutput(self, command, data): if command != "LIST": return data cdata = data["cdata"] if "s" in cdata["modes"] and cdata["name"] not in data["user"].channels: data["cdata"] = {} # other +s stuff is hiding in other modules. class Spawner(object): def __init__(self, ircd): self.ircd = ircd self.mode_s = None def spawn(self): self.mode_s = SecretMode() return { "modes": { "cns": self.mode_s }, "actions": { "commandextra": [self.mode_s.listOutput] } def cleanup(self): self.ircd.removeMode("cns") self.ircd.actions["commandextra"].remove(self.mode_s.listOutput)
<commit_before><commit_msg>Implement channel mode +s (or what's left of it)<commit_after>from txircd.modbase import Mode class SecretMode(Mode): def listOutput(self, command, data): if command != "LIST": return data cdata = data["cdata"] if "s" in cdata["modes"] and cdata["name"] not in data["user"].channels: data["cdata"] = {} # other +s stuff is hiding in other modules. class Spawner(object): def __init__(self, ircd): self.ircd = ircd self.mode_s = None def spawn(self): self.mode_s = SecretMode() return { "modes": { "cns": self.mode_s }, "actions": { "commandextra": [self.mode_s.listOutput] } def cleanup(self): self.ircd.removeMode("cns") self.ircd.actions["commandextra"].remove(self.mode_s.listOutput)
d41b7a57e6c6f562b5b750a26e46a4b6407c0384
examples/save_users_following_into_file.py
examples/save_users_following_into_file.py
""" instabot example Workflow: Save users' following into a file. """ import argparse import os import sys from tqdm import tqdm sys.path.append(os.path.join(sys.path[0], '../')) from instabot import Bot parser = argparse.ArgumentParser(add_help=False) parser.add_argument('-u', type=str, help="username") parser.add_argument('-p', type=str, help="password") parser.add_argument('-proxy', type=str, help="proxy") parser.add_argument('users', type=str, nargs='+', help='users') args = parser.parse_args() bot = Bot() bot.login(username=args.u, password=args.p, proxy=args.proxy) fh = open("users_following.txt", "a+") for username in args.users: following = bot.get_user_following(username) for user in following: fh.write(user + "\n") fh.close()
Save users' following into file
Save users' following into file
Python
apache-2.0
ohld/instabot,instagrambot/instabot,instagrambot/instabot
Save users' following into file
""" instabot example Workflow: Save users' following into a file. """ import argparse import os import sys from tqdm import tqdm sys.path.append(os.path.join(sys.path[0], '../')) from instabot import Bot parser = argparse.ArgumentParser(add_help=False) parser.add_argument('-u', type=str, help="username") parser.add_argument('-p', type=str, help="password") parser.add_argument('-proxy', type=str, help="proxy") parser.add_argument('users', type=str, nargs='+', help='users') args = parser.parse_args() bot = Bot() bot.login(username=args.u, password=args.p, proxy=args.proxy) fh = open("users_following.txt", "a+") for username in args.users: following = bot.get_user_following(username) for user in following: fh.write(user + "\n") fh.close()
<commit_before><commit_msg>Save users' following into file<commit_after>
""" instabot example Workflow: Save users' following into a file. """ import argparse import os import sys from tqdm import tqdm sys.path.append(os.path.join(sys.path[0], '../')) from instabot import Bot parser = argparse.ArgumentParser(add_help=False) parser.add_argument('-u', type=str, help="username") parser.add_argument('-p', type=str, help="password") parser.add_argument('-proxy', type=str, help="proxy") parser.add_argument('users', type=str, nargs='+', help='users') args = parser.parse_args() bot = Bot() bot.login(username=args.u, password=args.p, proxy=args.proxy) fh = open("users_following.txt", "a+") for username in args.users: following = bot.get_user_following(username) for user in following: fh.write(user + "\n") fh.close()
Save users' following into file""" instabot example Workflow: Save users' following into a file. """ import argparse import os import sys from tqdm import tqdm sys.path.append(os.path.join(sys.path[0], '../')) from instabot import Bot parser = argparse.ArgumentParser(add_help=False) parser.add_argument('-u', type=str, help="username") parser.add_argument('-p', type=str, help="password") parser.add_argument('-proxy', type=str, help="proxy") parser.add_argument('users', type=str, nargs='+', help='users') args = parser.parse_args() bot = Bot() bot.login(username=args.u, password=args.p, proxy=args.proxy) fh = open("users_following.txt", "a+") for username in args.users: following = bot.get_user_following(username) for user in following: fh.write(user + "\n") fh.close()
<commit_before><commit_msg>Save users' following into file<commit_after>""" instabot example Workflow: Save users' following into a file. """ import argparse import os import sys from tqdm import tqdm sys.path.append(os.path.join(sys.path[0], '../')) from instabot import Bot parser = argparse.ArgumentParser(add_help=False) parser.add_argument('-u', type=str, help="username") parser.add_argument('-p', type=str, help="password") parser.add_argument('-proxy', type=str, help="proxy") parser.add_argument('users', type=str, nargs='+', help='users') args = parser.parse_args() bot = Bot() bot.login(username=args.u, password=args.p, proxy=args.proxy) fh = open("users_following.txt", "a+") for username in args.users: following = bot.get_user_following(username) for user in following: fh.write(user + "\n") fh.close()
d35737fb7a758de8fb45e45323aeda1a6b9e7c4a
bearstorage.py
bearstorage.py
import socket from geoip import geolite2 class BearStorage(): def __init__(self, ip, rawrequest, timestamp, parsed_request, isDetected): self.ip = ip # String self.rawrequest = rawrequest # String self.timestamp = timestamp # Datetime self.path = parsed_request.path # String self.command = parsed_request.command # String self.version = parsed_request.request_version # String self.headers = parsed_request.headers # String self.isDetected = isDetected # Bool location = geolite2.lookup(ip) if location is not None: self.country = location.country self.continent = location.continent self.timezone = location.timezone else: self.country = '' self.continent = '' self.timezone = '' self.tracert = "" # TODO try: self.dnsname = socket.gethostbyaddr('192.30.252.130')[0] except: self.dnsname = "" # import subprocess # host = 'www.microsoft.com' # p = subprocess.Popen(["tracert", '-d', '-w', '100', host], # stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # while True: # line = p.stdout.readline() # if not line: # break # print '-->', line, # p.wait()
Add class for sending data from client to server
Add class for sending data from client to server
Python
mit
Zloool/manyfaced-honeypot
Add class for sending data from client to server
import socket from geoip import geolite2 class BearStorage(): def __init__(self, ip, rawrequest, timestamp, parsed_request, isDetected): self.ip = ip # String self.rawrequest = rawrequest # String self.timestamp = timestamp # Datetime self.path = parsed_request.path # String self.command = parsed_request.command # String self.version = parsed_request.request_version # String self.headers = parsed_request.headers # String self.isDetected = isDetected # Bool location = geolite2.lookup(ip) if location is not None: self.country = location.country self.continent = location.continent self.timezone = location.timezone else: self.country = '' self.continent = '' self.timezone = '' self.tracert = "" # TODO try: self.dnsname = socket.gethostbyaddr('192.30.252.130')[0] except: self.dnsname = "" # import subprocess # host = 'www.microsoft.com' # p = subprocess.Popen(["tracert", '-d', '-w', '100', host], # stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # while True: # line = p.stdout.readline() # if not line: # break # print '-->', line, # p.wait()
<commit_before><commit_msg>Add class for sending data from client to server<commit_after>
import socket from geoip import geolite2 class BearStorage(): def __init__(self, ip, rawrequest, timestamp, parsed_request, isDetected): self.ip = ip # String self.rawrequest = rawrequest # String self.timestamp = timestamp # Datetime self.path = parsed_request.path # String self.command = parsed_request.command # String self.version = parsed_request.request_version # String self.headers = parsed_request.headers # String self.isDetected = isDetected # Bool location = geolite2.lookup(ip) if location is not None: self.country = location.country self.continent = location.continent self.timezone = location.timezone else: self.country = '' self.continent = '' self.timezone = '' self.tracert = "" # TODO try: self.dnsname = socket.gethostbyaddr('192.30.252.130')[0] except: self.dnsname = "" # import subprocess # host = 'www.microsoft.com' # p = subprocess.Popen(["tracert", '-d', '-w', '100', host], # stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # while True: # line = p.stdout.readline() # if not line: # break # print '-->', line, # p.wait()
Add class for sending data from client to serverimport socket from geoip import geolite2 class BearStorage(): def __init__(self, ip, rawrequest, timestamp, parsed_request, isDetected): self.ip = ip # String self.rawrequest = rawrequest # String self.timestamp = timestamp # Datetime self.path = parsed_request.path # String self.command = parsed_request.command # String self.version = parsed_request.request_version # String self.headers = parsed_request.headers # String self.isDetected = isDetected # Bool location = geolite2.lookup(ip) if location is not None: self.country = location.country self.continent = location.continent self.timezone = location.timezone else: self.country = '' self.continent = '' self.timezone = '' self.tracert = "" # TODO try: self.dnsname = socket.gethostbyaddr('192.30.252.130')[0] except: self.dnsname = "" # import subprocess # host = 'www.microsoft.com' # p = subprocess.Popen(["tracert", '-d', '-w', '100', host], # stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # while True: # line = p.stdout.readline() # if not line: # break # print '-->', line, # p.wait()
<commit_before><commit_msg>Add class for sending data from client to server<commit_after>import socket from geoip import geolite2 class BearStorage(): def __init__(self, ip, rawrequest, timestamp, parsed_request, isDetected): self.ip = ip # String self.rawrequest = rawrequest # String self.timestamp = timestamp # Datetime self.path = parsed_request.path # String self.command = parsed_request.command # String self.version = parsed_request.request_version # String self.headers = parsed_request.headers # String self.isDetected = isDetected # Bool location = geolite2.lookup(ip) if location is not None: self.country = location.country self.continent = location.continent self.timezone = location.timezone else: self.country = '' self.continent = '' self.timezone = '' self.tracert = "" # TODO try: self.dnsname = socket.gethostbyaddr('192.30.252.130')[0] except: self.dnsname = "" # import subprocess # host = 'www.microsoft.com' # p = subprocess.Popen(["tracert", '-d', '-w', '100', host], # stdout=subprocess.PIPE, stderr=subprocess.STDOUT) # while True: # line = p.stdout.readline() # if not line: # break # print '-->', line, # p.wait()
867c53c6457a24bc89f87cf2362d02d8542cf66e
books/views.py
books/views.py
from django.shortcuts import render from django.views.generic import TemplateView from rest_framework import viewsets, filters from books.models import Book, Category, SubCategory from books.serializers import BookSerializer, CategorySerializer, SubCategorySerializer class HomeTemplateView(TemplateView, ): template_name = 'home.html' class BookViewSet(viewsets.ModelViewSet): queryset = Book.objects.all() serializer_class = BookSerializer filter_backends = (filters.SearchFilter,) search_fields = ('title', 'subcategory__category__name', 'subcategory__name') class CategoryViewSet(viewsets.ModelViewSet): queryset = Category.objects.all() serializer_class = CategorySerializer filter_backends = (filters.SearchFilter,) search_fields = ('name', ) class SubCategoryViewSet(viewsets.ModelViewSet): queryset = SubCategory.objects.all() serializer_class = SubCategorySerializer filter_backends = (filters.SearchFilter,) search_fields = ('category__name', )
from django.shortcuts import render from django.views.generic import TemplateView from rest_framework import viewsets, filters from books.models import Book, Category, SubCategory from books.serializers import BookSerializer, CategorySerializer, SubCategorySerializer class HomeTemplateView(TemplateView, ): template_name = 'home.html' class BookViewSet(viewsets.ModelViewSet): queryset = Book.objects.all() serializer_class = BookSerializer filter_backends = (filters.SearchFilter,) search_fields = ('title', 'subcategory__category__name', 'subcategory__name') class CategoryViewSet(viewsets.ModelViewSet): queryset = Category.objects.all() serializer_class = CategorySerializer filter_backends = (filters.SearchFilter,) search_fields = ('name', ) class SubCategoryViewSet(viewsets.ModelViewSet): queryset = SubCategory.objects.all() serializer_class = SubCategorySerializer filter_backends = (filters.SearchFilter,) search_fields = ('name, category__name', ) def get_queryset(self): queryset = SubCategory.objects.all() category = self.request.query_params.get('category', None) if category is not None: queryset = queryset.filter(category_id=category) return queryset
Add category id filtering to subcategories
Add category id filtering to subcategories
Python
unlicense
spapas/react-tutorial,tbeg/react-tutorial,d0ntg0m0ng/react-tutorial-1,d0ntg0m0ng/react-tutorial-1,tbeg/react-tutorial,spapas/react-tutorial,d0ntg0m0ng/react-tutorial-1,spapas/react-tutorial,tbeg/react-tutorial,spapas/react-tutorial
from django.shortcuts import render from django.views.generic import TemplateView from rest_framework import viewsets, filters from books.models import Book, Category, SubCategory from books.serializers import BookSerializer, CategorySerializer, SubCategorySerializer class HomeTemplateView(TemplateView, ): template_name = 'home.html' class BookViewSet(viewsets.ModelViewSet): queryset = Book.objects.all() serializer_class = BookSerializer filter_backends = (filters.SearchFilter,) search_fields = ('title', 'subcategory__category__name', 'subcategory__name') class CategoryViewSet(viewsets.ModelViewSet): queryset = Category.objects.all() serializer_class = CategorySerializer filter_backends = (filters.SearchFilter,) search_fields = ('name', ) class SubCategoryViewSet(viewsets.ModelViewSet): queryset = SubCategory.objects.all() serializer_class = SubCategorySerializer filter_backends = (filters.SearchFilter,) search_fields = ('category__name', )Add category id filtering to subcategories
from django.shortcuts import render from django.views.generic import TemplateView from rest_framework import viewsets, filters from books.models import Book, Category, SubCategory from books.serializers import BookSerializer, CategorySerializer, SubCategorySerializer class HomeTemplateView(TemplateView, ): template_name = 'home.html' class BookViewSet(viewsets.ModelViewSet): queryset = Book.objects.all() serializer_class = BookSerializer filter_backends = (filters.SearchFilter,) search_fields = ('title', 'subcategory__category__name', 'subcategory__name') class CategoryViewSet(viewsets.ModelViewSet): queryset = Category.objects.all() serializer_class = CategorySerializer filter_backends = (filters.SearchFilter,) search_fields = ('name', ) class SubCategoryViewSet(viewsets.ModelViewSet): queryset = SubCategory.objects.all() serializer_class = SubCategorySerializer filter_backends = (filters.SearchFilter,) search_fields = ('name, category__name', ) def get_queryset(self): queryset = SubCategory.objects.all() category = self.request.query_params.get('category', None) if category is not None: queryset = queryset.filter(category_id=category) return queryset
<commit_before>from django.shortcuts import render from django.views.generic import TemplateView from rest_framework import viewsets, filters from books.models import Book, Category, SubCategory from books.serializers import BookSerializer, CategorySerializer, SubCategorySerializer class HomeTemplateView(TemplateView, ): template_name = 'home.html' class BookViewSet(viewsets.ModelViewSet): queryset = Book.objects.all() serializer_class = BookSerializer filter_backends = (filters.SearchFilter,) search_fields = ('title', 'subcategory__category__name', 'subcategory__name') class CategoryViewSet(viewsets.ModelViewSet): queryset = Category.objects.all() serializer_class = CategorySerializer filter_backends = (filters.SearchFilter,) search_fields = ('name', ) class SubCategoryViewSet(viewsets.ModelViewSet): queryset = SubCategory.objects.all() serializer_class = SubCategorySerializer filter_backends = (filters.SearchFilter,) search_fields = ('category__name', )<commit_msg>Add category id filtering to subcategories<commit_after>
from django.shortcuts import render from django.views.generic import TemplateView from rest_framework import viewsets, filters from books.models import Book, Category, SubCategory from books.serializers import BookSerializer, CategorySerializer, SubCategorySerializer class HomeTemplateView(TemplateView, ): template_name = 'home.html' class BookViewSet(viewsets.ModelViewSet): queryset = Book.objects.all() serializer_class = BookSerializer filter_backends = (filters.SearchFilter,) search_fields = ('title', 'subcategory__category__name', 'subcategory__name') class CategoryViewSet(viewsets.ModelViewSet): queryset = Category.objects.all() serializer_class = CategorySerializer filter_backends = (filters.SearchFilter,) search_fields = ('name', ) class SubCategoryViewSet(viewsets.ModelViewSet): queryset = SubCategory.objects.all() serializer_class = SubCategorySerializer filter_backends = (filters.SearchFilter,) search_fields = ('name, category__name', ) def get_queryset(self): queryset = SubCategory.objects.all() category = self.request.query_params.get('category', None) if category is not None: queryset = queryset.filter(category_id=category) return queryset
from django.shortcuts import render from django.views.generic import TemplateView from rest_framework import viewsets, filters from books.models import Book, Category, SubCategory from books.serializers import BookSerializer, CategorySerializer, SubCategorySerializer class HomeTemplateView(TemplateView, ): template_name = 'home.html' class BookViewSet(viewsets.ModelViewSet): queryset = Book.objects.all() serializer_class = BookSerializer filter_backends = (filters.SearchFilter,) search_fields = ('title', 'subcategory__category__name', 'subcategory__name') class CategoryViewSet(viewsets.ModelViewSet): queryset = Category.objects.all() serializer_class = CategorySerializer filter_backends = (filters.SearchFilter,) search_fields = ('name', ) class SubCategoryViewSet(viewsets.ModelViewSet): queryset = SubCategory.objects.all() serializer_class = SubCategorySerializer filter_backends = (filters.SearchFilter,) search_fields = ('category__name', )Add category id filtering to subcategoriesfrom django.shortcuts import render from django.views.generic import TemplateView from rest_framework import viewsets, filters from books.models import Book, Category, SubCategory from books.serializers import BookSerializer, CategorySerializer, SubCategorySerializer class HomeTemplateView(TemplateView, ): template_name = 'home.html' class BookViewSet(viewsets.ModelViewSet): queryset = Book.objects.all() serializer_class = BookSerializer filter_backends = (filters.SearchFilter,) search_fields = ('title', 'subcategory__category__name', 'subcategory__name') class CategoryViewSet(viewsets.ModelViewSet): queryset = Category.objects.all() serializer_class = CategorySerializer filter_backends = (filters.SearchFilter,) search_fields = ('name', ) class SubCategoryViewSet(viewsets.ModelViewSet): queryset = SubCategory.objects.all() serializer_class = SubCategorySerializer filter_backends = (filters.SearchFilter,) search_fields = ('name, category__name', ) def get_queryset(self): queryset = SubCategory.objects.all() category = self.request.query_params.get('category', None) if category is not None: queryset = queryset.filter(category_id=category) return queryset
<commit_before>from django.shortcuts import render from django.views.generic import TemplateView from rest_framework import viewsets, filters from books.models import Book, Category, SubCategory from books.serializers import BookSerializer, CategorySerializer, SubCategorySerializer class HomeTemplateView(TemplateView, ): template_name = 'home.html' class BookViewSet(viewsets.ModelViewSet): queryset = Book.objects.all() serializer_class = BookSerializer filter_backends = (filters.SearchFilter,) search_fields = ('title', 'subcategory__category__name', 'subcategory__name') class CategoryViewSet(viewsets.ModelViewSet): queryset = Category.objects.all() serializer_class = CategorySerializer filter_backends = (filters.SearchFilter,) search_fields = ('name', ) class SubCategoryViewSet(viewsets.ModelViewSet): queryset = SubCategory.objects.all() serializer_class = SubCategorySerializer filter_backends = (filters.SearchFilter,) search_fields = ('category__name', )<commit_msg>Add category id filtering to subcategories<commit_after>from django.shortcuts import render from django.views.generic import TemplateView from rest_framework import viewsets, filters from books.models import Book, Category, SubCategory from books.serializers import BookSerializer, CategorySerializer, SubCategorySerializer class HomeTemplateView(TemplateView, ): template_name = 'home.html' class BookViewSet(viewsets.ModelViewSet): queryset = Book.objects.all() serializer_class = BookSerializer filter_backends = (filters.SearchFilter,) search_fields = ('title', 'subcategory__category__name', 'subcategory__name') class CategoryViewSet(viewsets.ModelViewSet): queryset = Category.objects.all() serializer_class = CategorySerializer filter_backends = (filters.SearchFilter,) search_fields = ('name', ) class SubCategoryViewSet(viewsets.ModelViewSet): queryset = SubCategory.objects.all() serializer_class = SubCategorySerializer filter_backends = (filters.SearchFilter,) search_fields = ('name, category__name', ) def get_queryset(self): queryset = SubCategory.objects.all() category = self.request.query_params.get('category', None) if category is not None: queryset = queryset.filter(category_id=category) return queryset
ed0ecfdcc614ee29d5aab8565b761b38193e01ae
test/pymadtest/test_interface.py
test/pymadtest/test_interface.py
# tested classes from cern.pymad.abc.interface import Interface # test utilities import unittest from inspect import getdoc from abc import abstractmethod # class AbstractBase(Interface): @abstractmethod def foo(self): """ Dummy documentation for AbstractBase.foo """ pass @abstractmethod def bar(self): """ Dummy documentation for AbstractBase.bar """ pass def _bar(self): """ Dummy documentation for Derived.bar """ pass class Derived(AbstractBase): def foo(self): pass bar = _bar class TestInterface(unittest.TestCase): def test_method_doc(self): self.assertEqual( getdoc(Derived.foo), getdoc(AbstractBase.foo)) self.assertTrue( getdoc(Derived.bar).startswith(getdoc(_bar))) self.assertTrue( getdoc(Derived.bar).endswith(getdoc(AbstractBase.bar)))
Add test for Interface base class
Add test for Interface base class
Python
apache-2.0
pymad/jpymad,pymad/cpymad,pymad/jpymad,pymad/cpymad,pymad/jpymad
Add test for Interface base class
# tested classes from cern.pymad.abc.interface import Interface # test utilities import unittest from inspect import getdoc from abc import abstractmethod # class AbstractBase(Interface): @abstractmethod def foo(self): """ Dummy documentation for AbstractBase.foo """ pass @abstractmethod def bar(self): """ Dummy documentation for AbstractBase.bar """ pass def _bar(self): """ Dummy documentation for Derived.bar """ pass class Derived(AbstractBase): def foo(self): pass bar = _bar class TestInterface(unittest.TestCase): def test_method_doc(self): self.assertEqual( getdoc(Derived.foo), getdoc(AbstractBase.foo)) self.assertTrue( getdoc(Derived.bar).startswith(getdoc(_bar))) self.assertTrue( getdoc(Derived.bar).endswith(getdoc(AbstractBase.bar)))
<commit_before><commit_msg>Add test for Interface base class<commit_after>
# tested classes from cern.pymad.abc.interface import Interface # test utilities import unittest from inspect import getdoc from abc import abstractmethod # class AbstractBase(Interface): @abstractmethod def foo(self): """ Dummy documentation for AbstractBase.foo """ pass @abstractmethod def bar(self): """ Dummy documentation for AbstractBase.bar """ pass def _bar(self): """ Dummy documentation for Derived.bar """ pass class Derived(AbstractBase): def foo(self): pass bar = _bar class TestInterface(unittest.TestCase): def test_method_doc(self): self.assertEqual( getdoc(Derived.foo), getdoc(AbstractBase.foo)) self.assertTrue( getdoc(Derived.bar).startswith(getdoc(_bar))) self.assertTrue( getdoc(Derived.bar).endswith(getdoc(AbstractBase.bar)))
Add test for Interface base class # tested classes from cern.pymad.abc.interface import Interface # test utilities import unittest from inspect import getdoc from abc import abstractmethod # class AbstractBase(Interface): @abstractmethod def foo(self): """ Dummy documentation for AbstractBase.foo """ pass @abstractmethod def bar(self): """ Dummy documentation for AbstractBase.bar """ pass def _bar(self): """ Dummy documentation for Derived.bar """ pass class Derived(AbstractBase): def foo(self): pass bar = _bar class TestInterface(unittest.TestCase): def test_method_doc(self): self.assertEqual( getdoc(Derived.foo), getdoc(AbstractBase.foo)) self.assertTrue( getdoc(Derived.bar).startswith(getdoc(_bar))) self.assertTrue( getdoc(Derived.bar).endswith(getdoc(AbstractBase.bar)))
<commit_before><commit_msg>Add test for Interface base class<commit_after> # tested classes from cern.pymad.abc.interface import Interface # test utilities import unittest from inspect import getdoc from abc import abstractmethod # class AbstractBase(Interface): @abstractmethod def foo(self): """ Dummy documentation for AbstractBase.foo """ pass @abstractmethod def bar(self): """ Dummy documentation for AbstractBase.bar """ pass def _bar(self): """ Dummy documentation for Derived.bar """ pass class Derived(AbstractBase): def foo(self): pass bar = _bar class TestInterface(unittest.TestCase): def test_method_doc(self): self.assertEqual( getdoc(Derived.foo), getdoc(AbstractBase.foo)) self.assertTrue( getdoc(Derived.bar).startswith(getdoc(_bar))) self.assertTrue( getdoc(Derived.bar).endswith(getdoc(AbstractBase.bar)))
19df6de71721db1a4d7b43e360731704ba462d9d
tests/services/user/test_find_user.py
tests/services/user/test_find_user.py
""" :Copyright: 2006-2020 Jochen Kupperschmidt :License: Modified BSD, see LICENSE for details. """ import pytest from byceps.services.user import service as user_service from tests.conftest import database_recreated from tests.helpers import create_user @pytest.fixture(scope='module') def app(party_app, db): with party_app.app_context(): with database_recreated(db): yield party_app @pytest.fixture(scope='module') def user(): return create_user( 'CarmenSandiego', email_address='carmen.sandiego@world.example' ) def test_find_user_by_email_address_non_lowercase(app, user): actual = user_service.find_user_by_email_address( 'Carmen.Sandiego@World.example' ) assert actual is not None assert actual.email_address == 'carmen.sandiego@world.example' def test_find_user_by_email_address_unknown(app, user): actual = user_service.find_user_by_email_address('no.idea@example.com') assert actual is None def test_find_user_by_screen_name_case_sensitive_match(app, user): actual = user_service.find_user_by_screen_name('CarmenSandiego') assert actual is not None assert actual.screen_name == 'CarmenSandiego' def test_find_user_by_screen_name_case_sensitive_miss(app, user): actual = user_service.find_user_by_screen_name('cARMENsANDIEGO') assert actual is None def test_find_user_by_screen_name_case_insensitive_match(app, user): actual = user_service.find_user_by_screen_name( 'cARMENsANDIEGO', case_insensitive=True ) assert actual is not None assert actual.screen_name == 'CarmenSandiego' def test_find_user_by_screen_name_case_insensitive_miss(app, user): actual = user_service.find_user_by_screen_name( 'cARMENsANDIEGOx', case_insensitive=True ) assert actual is None def test_find_user_by_screen_name_unknown(app, user): actual = user_service.find_user_by_screen_name('Dunno') assert actual is None
Test finding user by email address, screen name
Test finding user by email address, screen name
Python
bsd-3-clause
homeworkprod/byceps,homeworkprod/byceps,homeworkprod/byceps
Test finding user by email address, screen name
""" :Copyright: 2006-2020 Jochen Kupperschmidt :License: Modified BSD, see LICENSE for details. """ import pytest from byceps.services.user import service as user_service from tests.conftest import database_recreated from tests.helpers import create_user @pytest.fixture(scope='module') def app(party_app, db): with party_app.app_context(): with database_recreated(db): yield party_app @pytest.fixture(scope='module') def user(): return create_user( 'CarmenSandiego', email_address='carmen.sandiego@world.example' ) def test_find_user_by_email_address_non_lowercase(app, user): actual = user_service.find_user_by_email_address( 'Carmen.Sandiego@World.example' ) assert actual is not None assert actual.email_address == 'carmen.sandiego@world.example' def test_find_user_by_email_address_unknown(app, user): actual = user_service.find_user_by_email_address('no.idea@example.com') assert actual is None def test_find_user_by_screen_name_case_sensitive_match(app, user): actual = user_service.find_user_by_screen_name('CarmenSandiego') assert actual is not None assert actual.screen_name == 'CarmenSandiego' def test_find_user_by_screen_name_case_sensitive_miss(app, user): actual = user_service.find_user_by_screen_name('cARMENsANDIEGO') assert actual is None def test_find_user_by_screen_name_case_insensitive_match(app, user): actual = user_service.find_user_by_screen_name( 'cARMENsANDIEGO', case_insensitive=True ) assert actual is not None assert actual.screen_name == 'CarmenSandiego' def test_find_user_by_screen_name_case_insensitive_miss(app, user): actual = user_service.find_user_by_screen_name( 'cARMENsANDIEGOx', case_insensitive=True ) assert actual is None def test_find_user_by_screen_name_unknown(app, user): actual = user_service.find_user_by_screen_name('Dunno') assert actual is None
<commit_before><commit_msg>Test finding user by email address, screen name<commit_after>
""" :Copyright: 2006-2020 Jochen Kupperschmidt :License: Modified BSD, see LICENSE for details. """ import pytest from byceps.services.user import service as user_service from tests.conftest import database_recreated from tests.helpers import create_user @pytest.fixture(scope='module') def app(party_app, db): with party_app.app_context(): with database_recreated(db): yield party_app @pytest.fixture(scope='module') def user(): return create_user( 'CarmenSandiego', email_address='carmen.sandiego@world.example' ) def test_find_user_by_email_address_non_lowercase(app, user): actual = user_service.find_user_by_email_address( 'Carmen.Sandiego@World.example' ) assert actual is not None assert actual.email_address == 'carmen.sandiego@world.example' def test_find_user_by_email_address_unknown(app, user): actual = user_service.find_user_by_email_address('no.idea@example.com') assert actual is None def test_find_user_by_screen_name_case_sensitive_match(app, user): actual = user_service.find_user_by_screen_name('CarmenSandiego') assert actual is not None assert actual.screen_name == 'CarmenSandiego' def test_find_user_by_screen_name_case_sensitive_miss(app, user): actual = user_service.find_user_by_screen_name('cARMENsANDIEGO') assert actual is None def test_find_user_by_screen_name_case_insensitive_match(app, user): actual = user_service.find_user_by_screen_name( 'cARMENsANDIEGO', case_insensitive=True ) assert actual is not None assert actual.screen_name == 'CarmenSandiego' def test_find_user_by_screen_name_case_insensitive_miss(app, user): actual = user_service.find_user_by_screen_name( 'cARMENsANDIEGOx', case_insensitive=True ) assert actual is None def test_find_user_by_screen_name_unknown(app, user): actual = user_service.find_user_by_screen_name('Dunno') assert actual is None
Test finding user by email address, screen name""" :Copyright: 2006-2020 Jochen Kupperschmidt :License: Modified BSD, see LICENSE for details. """ import pytest from byceps.services.user import service as user_service from tests.conftest import database_recreated from tests.helpers import create_user @pytest.fixture(scope='module') def app(party_app, db): with party_app.app_context(): with database_recreated(db): yield party_app @pytest.fixture(scope='module') def user(): return create_user( 'CarmenSandiego', email_address='carmen.sandiego@world.example' ) def test_find_user_by_email_address_non_lowercase(app, user): actual = user_service.find_user_by_email_address( 'Carmen.Sandiego@World.example' ) assert actual is not None assert actual.email_address == 'carmen.sandiego@world.example' def test_find_user_by_email_address_unknown(app, user): actual = user_service.find_user_by_email_address('no.idea@example.com') assert actual is None def test_find_user_by_screen_name_case_sensitive_match(app, user): actual = user_service.find_user_by_screen_name('CarmenSandiego') assert actual is not None assert actual.screen_name == 'CarmenSandiego' def test_find_user_by_screen_name_case_sensitive_miss(app, user): actual = user_service.find_user_by_screen_name('cARMENsANDIEGO') assert actual is None def test_find_user_by_screen_name_case_insensitive_match(app, user): actual = user_service.find_user_by_screen_name( 'cARMENsANDIEGO', case_insensitive=True ) assert actual is not None assert actual.screen_name == 'CarmenSandiego' def test_find_user_by_screen_name_case_insensitive_miss(app, user): actual = user_service.find_user_by_screen_name( 'cARMENsANDIEGOx', case_insensitive=True ) assert actual is None def test_find_user_by_screen_name_unknown(app, user): actual = user_service.find_user_by_screen_name('Dunno') assert actual is None
<commit_before><commit_msg>Test finding user by email address, screen name<commit_after>""" :Copyright: 2006-2020 Jochen Kupperschmidt :License: Modified BSD, see LICENSE for details. """ import pytest from byceps.services.user import service as user_service from tests.conftest import database_recreated from tests.helpers import create_user @pytest.fixture(scope='module') def app(party_app, db): with party_app.app_context(): with database_recreated(db): yield party_app @pytest.fixture(scope='module') def user(): return create_user( 'CarmenSandiego', email_address='carmen.sandiego@world.example' ) def test_find_user_by_email_address_non_lowercase(app, user): actual = user_service.find_user_by_email_address( 'Carmen.Sandiego@World.example' ) assert actual is not None assert actual.email_address == 'carmen.sandiego@world.example' def test_find_user_by_email_address_unknown(app, user): actual = user_service.find_user_by_email_address('no.idea@example.com') assert actual is None def test_find_user_by_screen_name_case_sensitive_match(app, user): actual = user_service.find_user_by_screen_name('CarmenSandiego') assert actual is not None assert actual.screen_name == 'CarmenSandiego' def test_find_user_by_screen_name_case_sensitive_miss(app, user): actual = user_service.find_user_by_screen_name('cARMENsANDIEGO') assert actual is None def test_find_user_by_screen_name_case_insensitive_match(app, user): actual = user_service.find_user_by_screen_name( 'cARMENsANDIEGO', case_insensitive=True ) assert actual is not None assert actual.screen_name == 'CarmenSandiego' def test_find_user_by_screen_name_case_insensitive_miss(app, user): actual = user_service.find_user_by_screen_name( 'cARMENsANDIEGOx', case_insensitive=True ) assert actual is None def test_find_user_by_screen_name_unknown(app, user): actual = user_service.find_user_by_screen_name('Dunno') assert actual is None
8858d1183f1c62449c77528e9dbbe3e4d2031f92
tests/unit/modules/virtualenv_test.py
tests/unit/modules/virtualenv_test.py
# -*- coding: utf-8 -*- ''' tests.unit.modules.virtualenv_test ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)` :copyright: © 2013 by the SaltStack Team, see AUTHORS for more details. :license: Apache 2.0, see LICENSE for more details. ''' import sys # Import Salt Testing libs from salttesting import skipIf, TestCase from salttesting.helpers import ensure_in_syspath ensure_in_syspath('../../') # Import 3rd party libs try: import virtualenv print 1, virtualenv, virtualenv.__version__ ovirtualenv = virtualenv except ImportError: import new ovirtualenv = virtualenv = new.module('virtualenv') sys.modules['virtualenv'] = virtualenv sys.modules['virtualenv'].__version__ = '1.9.1' try: from mock import MagicMock, patch has_mock = True except ImportError: has_mock = False patch = lambda x: lambda y: None # Import salt libs from salt.modules import virtualenv_mod virtualenv_mod.__salt__ = {'cmd.which_bin': lambda _: 'virtualenv'} @skipIf(has_mock is False, 'mock python module is unavailable') class VirtualenvTestCase(TestCase): def test_issue_6029_deprecated_distribute(self): VIRTUALENV_VERSION_INFO = virtualenv_mod.VIRTUALENV_VERSION_INFO virtualenv_mod.VIRTUALENV_VERSION_INFO = (1, 9, 1) mock = MagicMock(return_value={'retcode': 0, 'stdout': ''}) with patch.dict(virtualenv_mod.__salt__, {'cmd.run_all': mock}): virtualenv_mod.create( '/tmp/foo', no_site_packages=True, distribute=True ) mock.assert_called_once_with( 'virtualenv --no-site-packages --distribute /tmp/foo', runas=None ) virtualenv_mod.VIRTUALENV_VERSION_INFO = (1, 10) mock = MagicMock(return_value={'retcode': 0, 'stdout': ''}) with patch.dict(virtualenv_mod.__salt__, {'cmd.run_all': mock}): virtualenv_mod.create( '/tmp/foo', no_site_packages=True, distribute=True ) mock.assert_called_once_with( 'virtualenv --no-site-packages /tmp/foo', runas=None ) virtualenv_mod.VIRTUALENV_VERSION_INFO = VIRTUALENV_VERSION_INFO if __name__ == '__main__': from integration import run_tests run_tests(VirtualenvTestCase, needs_daemon=False)
Add test case which tests for proper deprecated output based on virtualenv version.
Add test case which tests for proper deprecated output based on virtualenv version.
Python
apache-2.0
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
Add test case which tests for proper deprecated output based on virtualenv version.
# -*- coding: utf-8 -*- ''' tests.unit.modules.virtualenv_test ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)` :copyright: © 2013 by the SaltStack Team, see AUTHORS for more details. :license: Apache 2.0, see LICENSE for more details. ''' import sys # Import Salt Testing libs from salttesting import skipIf, TestCase from salttesting.helpers import ensure_in_syspath ensure_in_syspath('../../') # Import 3rd party libs try: import virtualenv print 1, virtualenv, virtualenv.__version__ ovirtualenv = virtualenv except ImportError: import new ovirtualenv = virtualenv = new.module('virtualenv') sys.modules['virtualenv'] = virtualenv sys.modules['virtualenv'].__version__ = '1.9.1' try: from mock import MagicMock, patch has_mock = True except ImportError: has_mock = False patch = lambda x: lambda y: None # Import salt libs from salt.modules import virtualenv_mod virtualenv_mod.__salt__ = {'cmd.which_bin': lambda _: 'virtualenv'} @skipIf(has_mock is False, 'mock python module is unavailable') class VirtualenvTestCase(TestCase): def test_issue_6029_deprecated_distribute(self): VIRTUALENV_VERSION_INFO = virtualenv_mod.VIRTUALENV_VERSION_INFO virtualenv_mod.VIRTUALENV_VERSION_INFO = (1, 9, 1) mock = MagicMock(return_value={'retcode': 0, 'stdout': ''}) with patch.dict(virtualenv_mod.__salt__, {'cmd.run_all': mock}): virtualenv_mod.create( '/tmp/foo', no_site_packages=True, distribute=True ) mock.assert_called_once_with( 'virtualenv --no-site-packages --distribute /tmp/foo', runas=None ) virtualenv_mod.VIRTUALENV_VERSION_INFO = (1, 10) mock = MagicMock(return_value={'retcode': 0, 'stdout': ''}) with patch.dict(virtualenv_mod.__salt__, {'cmd.run_all': mock}): virtualenv_mod.create( '/tmp/foo', no_site_packages=True, distribute=True ) mock.assert_called_once_with( 'virtualenv --no-site-packages /tmp/foo', runas=None ) virtualenv_mod.VIRTUALENV_VERSION_INFO = VIRTUALENV_VERSION_INFO if __name__ == '__main__': from integration import run_tests run_tests(VirtualenvTestCase, needs_daemon=False)
<commit_before><commit_msg>Add test case which tests for proper deprecated output based on virtualenv version.<commit_after>
# -*- coding: utf-8 -*- ''' tests.unit.modules.virtualenv_test ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)` :copyright: © 2013 by the SaltStack Team, see AUTHORS for more details. :license: Apache 2.0, see LICENSE for more details. ''' import sys # Import Salt Testing libs from salttesting import skipIf, TestCase from salttesting.helpers import ensure_in_syspath ensure_in_syspath('../../') # Import 3rd party libs try: import virtualenv print 1, virtualenv, virtualenv.__version__ ovirtualenv = virtualenv except ImportError: import new ovirtualenv = virtualenv = new.module('virtualenv') sys.modules['virtualenv'] = virtualenv sys.modules['virtualenv'].__version__ = '1.9.1' try: from mock import MagicMock, patch has_mock = True except ImportError: has_mock = False patch = lambda x: lambda y: None # Import salt libs from salt.modules import virtualenv_mod virtualenv_mod.__salt__ = {'cmd.which_bin': lambda _: 'virtualenv'} @skipIf(has_mock is False, 'mock python module is unavailable') class VirtualenvTestCase(TestCase): def test_issue_6029_deprecated_distribute(self): VIRTUALENV_VERSION_INFO = virtualenv_mod.VIRTUALENV_VERSION_INFO virtualenv_mod.VIRTUALENV_VERSION_INFO = (1, 9, 1) mock = MagicMock(return_value={'retcode': 0, 'stdout': ''}) with patch.dict(virtualenv_mod.__salt__, {'cmd.run_all': mock}): virtualenv_mod.create( '/tmp/foo', no_site_packages=True, distribute=True ) mock.assert_called_once_with( 'virtualenv --no-site-packages --distribute /tmp/foo', runas=None ) virtualenv_mod.VIRTUALENV_VERSION_INFO = (1, 10) mock = MagicMock(return_value={'retcode': 0, 'stdout': ''}) with patch.dict(virtualenv_mod.__salt__, {'cmd.run_all': mock}): virtualenv_mod.create( '/tmp/foo', no_site_packages=True, distribute=True ) mock.assert_called_once_with( 'virtualenv --no-site-packages /tmp/foo', runas=None ) virtualenv_mod.VIRTUALENV_VERSION_INFO = VIRTUALENV_VERSION_INFO if __name__ == '__main__': from integration import run_tests run_tests(VirtualenvTestCase, needs_daemon=False)
Add test case which tests for proper deprecated output based on virtualenv version.# -*- coding: utf-8 -*- ''' tests.unit.modules.virtualenv_test ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)` :copyright: © 2013 by the SaltStack Team, see AUTHORS for more details. :license: Apache 2.0, see LICENSE for more details. ''' import sys # Import Salt Testing libs from salttesting import skipIf, TestCase from salttesting.helpers import ensure_in_syspath ensure_in_syspath('../../') # Import 3rd party libs try: import virtualenv print 1, virtualenv, virtualenv.__version__ ovirtualenv = virtualenv except ImportError: import new ovirtualenv = virtualenv = new.module('virtualenv') sys.modules['virtualenv'] = virtualenv sys.modules['virtualenv'].__version__ = '1.9.1' try: from mock import MagicMock, patch has_mock = True except ImportError: has_mock = False patch = lambda x: lambda y: None # Import salt libs from salt.modules import virtualenv_mod virtualenv_mod.__salt__ = {'cmd.which_bin': lambda _: 'virtualenv'} @skipIf(has_mock is False, 'mock python module is unavailable') class VirtualenvTestCase(TestCase): def test_issue_6029_deprecated_distribute(self): VIRTUALENV_VERSION_INFO = virtualenv_mod.VIRTUALENV_VERSION_INFO virtualenv_mod.VIRTUALENV_VERSION_INFO = (1, 9, 1) mock = MagicMock(return_value={'retcode': 0, 'stdout': ''}) with patch.dict(virtualenv_mod.__salt__, {'cmd.run_all': mock}): virtualenv_mod.create( '/tmp/foo', no_site_packages=True, distribute=True ) mock.assert_called_once_with( 'virtualenv --no-site-packages --distribute /tmp/foo', runas=None ) virtualenv_mod.VIRTUALENV_VERSION_INFO = (1, 10) mock = MagicMock(return_value={'retcode': 0, 'stdout': ''}) with patch.dict(virtualenv_mod.__salt__, {'cmd.run_all': mock}): virtualenv_mod.create( '/tmp/foo', no_site_packages=True, distribute=True ) mock.assert_called_once_with( 'virtualenv --no-site-packages /tmp/foo', runas=None ) virtualenv_mod.VIRTUALENV_VERSION_INFO = VIRTUALENV_VERSION_INFO if __name__ == '__main__': from integration import run_tests run_tests(VirtualenvTestCase, needs_daemon=False)
<commit_before><commit_msg>Add test case which tests for proper deprecated output based on virtualenv version.<commit_after># -*- coding: utf-8 -*- ''' tests.unit.modules.virtualenv_test ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :codeauthor: :email:`Pedro Algarvio (pedro@algarvio.me)` :copyright: © 2013 by the SaltStack Team, see AUTHORS for more details. :license: Apache 2.0, see LICENSE for more details. ''' import sys # Import Salt Testing libs from salttesting import skipIf, TestCase from salttesting.helpers import ensure_in_syspath ensure_in_syspath('../../') # Import 3rd party libs try: import virtualenv print 1, virtualenv, virtualenv.__version__ ovirtualenv = virtualenv except ImportError: import new ovirtualenv = virtualenv = new.module('virtualenv') sys.modules['virtualenv'] = virtualenv sys.modules['virtualenv'].__version__ = '1.9.1' try: from mock import MagicMock, patch has_mock = True except ImportError: has_mock = False patch = lambda x: lambda y: None # Import salt libs from salt.modules import virtualenv_mod virtualenv_mod.__salt__ = {'cmd.which_bin': lambda _: 'virtualenv'} @skipIf(has_mock is False, 'mock python module is unavailable') class VirtualenvTestCase(TestCase): def test_issue_6029_deprecated_distribute(self): VIRTUALENV_VERSION_INFO = virtualenv_mod.VIRTUALENV_VERSION_INFO virtualenv_mod.VIRTUALENV_VERSION_INFO = (1, 9, 1) mock = MagicMock(return_value={'retcode': 0, 'stdout': ''}) with patch.dict(virtualenv_mod.__salt__, {'cmd.run_all': mock}): virtualenv_mod.create( '/tmp/foo', no_site_packages=True, distribute=True ) mock.assert_called_once_with( 'virtualenv --no-site-packages --distribute /tmp/foo', runas=None ) virtualenv_mod.VIRTUALENV_VERSION_INFO = (1, 10) mock = MagicMock(return_value={'retcode': 0, 'stdout': ''}) with patch.dict(virtualenv_mod.__salt__, {'cmd.run_all': mock}): virtualenv_mod.create( '/tmp/foo', no_site_packages=True, distribute=True ) mock.assert_called_once_with( 'virtualenv --no-site-packages /tmp/foo', runas=None ) virtualenv_mod.VIRTUALENV_VERSION_INFO = VIRTUALENV_VERSION_INFO if __name__ == '__main__': from integration import run_tests run_tests(VirtualenvTestCase, needs_daemon=False)
7b9f255799252f56c149583b41535b7a85b326e6
tests/test_vector2_dot.py
tests/test_vector2_dot.py
from ppb_vector import Vector2 from math import isclose, sqrt import pytest # type: ignore from hypothesis import assume, given, note from utils import floats, vectors @given(x=vectors(), y=vectors()) def test_dot_commutes(x: Vector2, y: Vector2): assert x * y == y * x MAGNITUDE=1e10 @given(x=vectors(max_magnitude=MAGNITUDE), z=vectors(max_magnitude=MAGNITUDE), y=vectors(max_magnitude=sqrt(MAGNITUDE)), scalar=floats(max_magnitude=sqrt(MAGNITUDE))) def test_dot_linear(x: Vector2, y: Vector2, z: Vector2, scalar: float): """Test that x · (λ y + z) = λ x·y + x·z""" inner, outer = x * (scalar * y + z), scalar * x * y + x * z note(f"inner: {inner}") note(f"outer: {outer}") assert isclose(inner, outer, abs_tol=1e-5, rel_tol=1e-5)
Add initial tests for dot product
Add initial tests for dot product Closes #9
Python
artistic-2.0
ppb/ppb-vector,ppb/ppb-vector
Add initial tests for dot product Closes #9
from ppb_vector import Vector2 from math import isclose, sqrt import pytest # type: ignore from hypothesis import assume, given, note from utils import floats, vectors @given(x=vectors(), y=vectors()) def test_dot_commutes(x: Vector2, y: Vector2): assert x * y == y * x MAGNITUDE=1e10 @given(x=vectors(max_magnitude=MAGNITUDE), z=vectors(max_magnitude=MAGNITUDE), y=vectors(max_magnitude=sqrt(MAGNITUDE)), scalar=floats(max_magnitude=sqrt(MAGNITUDE))) def test_dot_linear(x: Vector2, y: Vector2, z: Vector2, scalar: float): """Test that x · (λ y + z) = λ x·y + x·z""" inner, outer = x * (scalar * y + z), scalar * x * y + x * z note(f"inner: {inner}") note(f"outer: {outer}") assert isclose(inner, outer, abs_tol=1e-5, rel_tol=1e-5)
<commit_before><commit_msg>Add initial tests for dot product Closes #9<commit_after>
from ppb_vector import Vector2 from math import isclose, sqrt import pytest # type: ignore from hypothesis import assume, given, note from utils import floats, vectors @given(x=vectors(), y=vectors()) def test_dot_commutes(x: Vector2, y: Vector2): assert x * y == y * x MAGNITUDE=1e10 @given(x=vectors(max_magnitude=MAGNITUDE), z=vectors(max_magnitude=MAGNITUDE), y=vectors(max_magnitude=sqrt(MAGNITUDE)), scalar=floats(max_magnitude=sqrt(MAGNITUDE))) def test_dot_linear(x: Vector2, y: Vector2, z: Vector2, scalar: float): """Test that x · (λ y + z) = λ x·y + x·z""" inner, outer = x * (scalar * y + z), scalar * x * y + x * z note(f"inner: {inner}") note(f"outer: {outer}") assert isclose(inner, outer, abs_tol=1e-5, rel_tol=1e-5)
Add initial tests for dot product Closes #9from ppb_vector import Vector2 from math import isclose, sqrt import pytest # type: ignore from hypothesis import assume, given, note from utils import floats, vectors @given(x=vectors(), y=vectors()) def test_dot_commutes(x: Vector2, y: Vector2): assert x * y == y * x MAGNITUDE=1e10 @given(x=vectors(max_magnitude=MAGNITUDE), z=vectors(max_magnitude=MAGNITUDE), y=vectors(max_magnitude=sqrt(MAGNITUDE)), scalar=floats(max_magnitude=sqrt(MAGNITUDE))) def test_dot_linear(x: Vector2, y: Vector2, z: Vector2, scalar: float): """Test that x · (λ y + z) = λ x·y + x·z""" inner, outer = x * (scalar * y + z), scalar * x * y + x * z note(f"inner: {inner}") note(f"outer: {outer}") assert isclose(inner, outer, abs_tol=1e-5, rel_tol=1e-5)
<commit_before><commit_msg>Add initial tests for dot product Closes #9<commit_after>from ppb_vector import Vector2 from math import isclose, sqrt import pytest # type: ignore from hypothesis import assume, given, note from utils import floats, vectors @given(x=vectors(), y=vectors()) def test_dot_commutes(x: Vector2, y: Vector2): assert x * y == y * x MAGNITUDE=1e10 @given(x=vectors(max_magnitude=MAGNITUDE), z=vectors(max_magnitude=MAGNITUDE), y=vectors(max_magnitude=sqrt(MAGNITUDE)), scalar=floats(max_magnitude=sqrt(MAGNITUDE))) def test_dot_linear(x: Vector2, y: Vector2, z: Vector2, scalar: float): """Test that x · (λ y + z) = λ x·y + x·z""" inner, outer = x * (scalar * y + z), scalar * x * y + x * z note(f"inner: {inner}") note(f"outer: {outer}") assert isclose(inner, outer, abs_tol=1e-5, rel_tol=1e-5)
44cbc513a60c248fcaa45505935cbac1cff231c7
CodeFights/chessKnight.py
CodeFights/chessKnight.py
#!/usr/local/bin/python # Code Fights Chess Knight Problem def chessKnight(cell): # Check it's a valid cell import re match = re.search(r'^([a-h])([1-8])$', cell, re.I) if not bool(match): # Invalid cell return 0 file, rank = match.group(1).lower(), int(match.group(2)) moves = 8 if file in "ah": moves -= 4 if rank == 2 or rank == 7: moves -= 1 elif rank == 1 or rank == 8: moves -= 2 elif file in "bg": moves -= 2 if rank == 2 or rank == 7: moves -= 2 elif rank == 1 or rank == 8: moves -= 3 elif rank == 2 or rank == 7: moves -= 2 elif rank == 1 or rank == 8: moves -= 4 return moves def main(): tests = [ ["a1", 2], ["c2", 6], ["d4", 8], ["g6", 6], ["a10", 0] ] for t in tests: res = chessKnight(t[0]) ans = t[1] if ans == res: print("PASSED: chessKnight({}) returned {}" .format(t[0], res)) else: print("FAILED: chessKnight({}) returned {}, answer: {}" .format(t[0], res, ans)) if __name__ == '__main__': main()
Solve Code Fights chess knight problem
Solve Code Fights chess knight problem
Python
mit
HKuz/Test_Code
Solve Code Fights chess knight problem
#!/usr/local/bin/python # Code Fights Chess Knight Problem def chessKnight(cell): # Check it's a valid cell import re match = re.search(r'^([a-h])([1-8])$', cell, re.I) if not bool(match): # Invalid cell return 0 file, rank = match.group(1).lower(), int(match.group(2)) moves = 8 if file in "ah": moves -= 4 if rank == 2 or rank == 7: moves -= 1 elif rank == 1 or rank == 8: moves -= 2 elif file in "bg": moves -= 2 if rank == 2 or rank == 7: moves -= 2 elif rank == 1 or rank == 8: moves -= 3 elif rank == 2 or rank == 7: moves -= 2 elif rank == 1 or rank == 8: moves -= 4 return moves def main(): tests = [ ["a1", 2], ["c2", 6], ["d4", 8], ["g6", 6], ["a10", 0] ] for t in tests: res = chessKnight(t[0]) ans = t[1] if ans == res: print("PASSED: chessKnight({}) returned {}" .format(t[0], res)) else: print("FAILED: chessKnight({}) returned {}, answer: {}" .format(t[0], res, ans)) if __name__ == '__main__': main()
<commit_before><commit_msg>Solve Code Fights chess knight problem<commit_after>
#!/usr/local/bin/python # Code Fights Chess Knight Problem def chessKnight(cell): # Check it's a valid cell import re match = re.search(r'^([a-h])([1-8])$', cell, re.I) if not bool(match): # Invalid cell return 0 file, rank = match.group(1).lower(), int(match.group(2)) moves = 8 if file in "ah": moves -= 4 if rank == 2 or rank == 7: moves -= 1 elif rank == 1 or rank == 8: moves -= 2 elif file in "bg": moves -= 2 if rank == 2 or rank == 7: moves -= 2 elif rank == 1 or rank == 8: moves -= 3 elif rank == 2 or rank == 7: moves -= 2 elif rank == 1 or rank == 8: moves -= 4 return moves def main(): tests = [ ["a1", 2], ["c2", 6], ["d4", 8], ["g6", 6], ["a10", 0] ] for t in tests: res = chessKnight(t[0]) ans = t[1] if ans == res: print("PASSED: chessKnight({}) returned {}" .format(t[0], res)) else: print("FAILED: chessKnight({}) returned {}, answer: {}" .format(t[0], res, ans)) if __name__ == '__main__': main()
Solve Code Fights chess knight problem#!/usr/local/bin/python # Code Fights Chess Knight Problem def chessKnight(cell): # Check it's a valid cell import re match = re.search(r'^([a-h])([1-8])$', cell, re.I) if not bool(match): # Invalid cell return 0 file, rank = match.group(1).lower(), int(match.group(2)) moves = 8 if file in "ah": moves -= 4 if rank == 2 or rank == 7: moves -= 1 elif rank == 1 or rank == 8: moves -= 2 elif file in "bg": moves -= 2 if rank == 2 or rank == 7: moves -= 2 elif rank == 1 or rank == 8: moves -= 3 elif rank == 2 or rank == 7: moves -= 2 elif rank == 1 or rank == 8: moves -= 4 return moves def main(): tests = [ ["a1", 2], ["c2", 6], ["d4", 8], ["g6", 6], ["a10", 0] ] for t in tests: res = chessKnight(t[0]) ans = t[1] if ans == res: print("PASSED: chessKnight({}) returned {}" .format(t[0], res)) else: print("FAILED: chessKnight({}) returned {}, answer: {}" .format(t[0], res, ans)) if __name__ == '__main__': main()
<commit_before><commit_msg>Solve Code Fights chess knight problem<commit_after>#!/usr/local/bin/python # Code Fights Chess Knight Problem def chessKnight(cell): # Check it's a valid cell import re match = re.search(r'^([a-h])([1-8])$', cell, re.I) if not bool(match): # Invalid cell return 0 file, rank = match.group(1).lower(), int(match.group(2)) moves = 8 if file in "ah": moves -= 4 if rank == 2 or rank == 7: moves -= 1 elif rank == 1 or rank == 8: moves -= 2 elif file in "bg": moves -= 2 if rank == 2 or rank == 7: moves -= 2 elif rank == 1 or rank == 8: moves -= 3 elif rank == 2 or rank == 7: moves -= 2 elif rank == 1 or rank == 8: moves -= 4 return moves def main(): tests = [ ["a1", 2], ["c2", 6], ["d4", 8], ["g6", 6], ["a10", 0] ] for t in tests: res = chessKnight(t[0]) ans = t[1] if ans == res: print("PASSED: chessKnight({}) returned {}" .format(t[0], res)) else: print("FAILED: chessKnight({}) returned {}, answer: {}" .format(t[0], res, ans)) if __name__ == '__main__': main()
7877ced4a52968c59f698ab0bcb0ff1b350e6045
ObjectTracking/test/displayIPVideoStream.py
ObjectTracking/test/displayIPVideoStream.py
#!/usr/bin/python # coding=utf-8 # vlc -vvv "/media/bat/DATA/videos/PERSEPOLIS.avi" --sout '#transcode{vcodec=mjpg,vb=2500,width=640,height=480,acodec=none}:standard{access=http,mux=mpjpeg,dst=localhost:8080/videofeed}' from SimpleCV import JpegStreamCamera, Display, Image cam = JpegStreamCamera('http://192.168.1.3:8080/videofeed') disp = Display() while disp.isNotDone(): img = cam.getImage() img.save(disp)
Add a basic test to display Video stream with simplecv
Add a basic test to display Video stream with simplecv
Python
mit
baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite,baptistelabat/robokite
Add a basic test to display Video stream with simplecv
#!/usr/bin/python # coding=utf-8 # vlc -vvv "/media/bat/DATA/videos/PERSEPOLIS.avi" --sout '#transcode{vcodec=mjpg,vb=2500,width=640,height=480,acodec=none}:standard{access=http,mux=mpjpeg,dst=localhost:8080/videofeed}' from SimpleCV import JpegStreamCamera, Display, Image cam = JpegStreamCamera('http://192.168.1.3:8080/videofeed') disp = Display() while disp.isNotDone(): img = cam.getImage() img.save(disp)
<commit_before><commit_msg>Add a basic test to display Video stream with simplecv<commit_after>
#!/usr/bin/python # coding=utf-8 # vlc -vvv "/media/bat/DATA/videos/PERSEPOLIS.avi" --sout '#transcode{vcodec=mjpg,vb=2500,width=640,height=480,acodec=none}:standard{access=http,mux=mpjpeg,dst=localhost:8080/videofeed}' from SimpleCV import JpegStreamCamera, Display, Image cam = JpegStreamCamera('http://192.168.1.3:8080/videofeed') disp = Display() while disp.isNotDone(): img = cam.getImage() img.save(disp)
Add a basic test to display Video stream with simplecv#!/usr/bin/python # coding=utf-8 # vlc -vvv "/media/bat/DATA/videos/PERSEPOLIS.avi" --sout '#transcode{vcodec=mjpg,vb=2500,width=640,height=480,acodec=none}:standard{access=http,mux=mpjpeg,dst=localhost:8080/videofeed}' from SimpleCV import JpegStreamCamera, Display, Image cam = JpegStreamCamera('http://192.168.1.3:8080/videofeed') disp = Display() while disp.isNotDone(): img = cam.getImage() img.save(disp)
<commit_before><commit_msg>Add a basic test to display Video stream with simplecv<commit_after>#!/usr/bin/python # coding=utf-8 # vlc -vvv "/media/bat/DATA/videos/PERSEPOLIS.avi" --sout '#transcode{vcodec=mjpg,vb=2500,width=640,height=480,acodec=none}:standard{access=http,mux=mpjpeg,dst=localhost:8080/videofeed}' from SimpleCV import JpegStreamCamera, Display, Image cam = JpegStreamCamera('http://192.168.1.3:8080/videofeed') disp = Display() while disp.isNotDone(): img = cam.getImage() img.save(disp)
55a505c411d350127807148b888f814abe610691
Cura/Scene/BoxRenderer.py
Cura/Scene/BoxRenderer.py
from Cura.Math.AxisAlignedBox import AxisAlignedBox from Cura.Math.Vector import Vector from Cura.Scene.ToolHandle import ToolHandle class BoxRenderer(ToolHandle): def __init__(self, box, parent = None): super().__init__(parent) md = self.getMeshData() rtf = box.rightTopFront lbb = box.leftBottomBack md.addVertex(rtf.x, rtf.y, rtf.z) #Right - Top - Front md.addVertex(lbb.x, rtf.y, rtf.z) #Left - Top - Front md.addVertex(lbb.x, rtf.y, rtf.z) #Left - Top - Front md.addVertex(lbb.x, lbb.y, rtf.z) #Left - Bottom - Front md.addVertex(lbb.x, lbb.y, rtf.z) #Left - Bottom - Front md.addVertex(rtf.x, lbb.y, rtf.z) #Right - Bottom - Front md.addVertex(rtf.x, lbb.y, rtf.z) #Right - Bottom - Front md.addVertex(rtf.x, rtf.y, rtf.z) #Right - Top - Front md.addVertex(rtf.x, rtf.y, lbb.z) #Right - Top - Back md.addVertex(lbb.x, rtf.y, lbb.z) #Left - Top - Back md.addVertex(lbb.x, rtf.y, lbb.z) #Left - Top - Back md.addVertex(lbb.x, lbb.y, lbb.z) #Left - Bottom - Back md.addVertex(lbb.x, lbb.y, lbb.z) #Left - Bottom - Back md.addVertex(rtf.x, lbb.y, lbb.z) #Right - Bottom - Back md.addVertex(rtf.x, lbb.y, lbb.z) #Right - Bottom - Back md.addVertex(rtf.x, rtf.y, lbb.z) #Right - Top - Back md.addVertex(rtf.x, rtf.y, rtf.z) #Right - Top - Front md.addVertex(rtf.x, rtf.y, lbb.z) #Right - Top - Back md.addVertex(lbb.x, rtf.y, rtf.z) #Left - Top - Front md.addVertex(lbb.x, rtf.y, lbb.z) #Left - Top - Back md.addVertex(lbb.x, lbb.y, rtf.z) #Left - Bottom - Front md.addVertex(lbb.x, lbb.y, lbb.z) #Left - Bottom - Back md.addVertex(rtf.x, lbb.y, rtf.z) #Right - Bottom - Front md.addVertex(rtf.x, lbb.y, lbb.z) #Right - Bottom - Back
Add a SceneNode subclass that can render a bounding box
Add a SceneNode subclass that can render a bounding box
Python
agpl-3.0
onitake/Uranium,onitake/Uranium
Add a SceneNode subclass that can render a bounding box
from Cura.Math.AxisAlignedBox import AxisAlignedBox from Cura.Math.Vector import Vector from Cura.Scene.ToolHandle import ToolHandle class BoxRenderer(ToolHandle): def __init__(self, box, parent = None): super().__init__(parent) md = self.getMeshData() rtf = box.rightTopFront lbb = box.leftBottomBack md.addVertex(rtf.x, rtf.y, rtf.z) #Right - Top - Front md.addVertex(lbb.x, rtf.y, rtf.z) #Left - Top - Front md.addVertex(lbb.x, rtf.y, rtf.z) #Left - Top - Front md.addVertex(lbb.x, lbb.y, rtf.z) #Left - Bottom - Front md.addVertex(lbb.x, lbb.y, rtf.z) #Left - Bottom - Front md.addVertex(rtf.x, lbb.y, rtf.z) #Right - Bottom - Front md.addVertex(rtf.x, lbb.y, rtf.z) #Right - Bottom - Front md.addVertex(rtf.x, rtf.y, rtf.z) #Right - Top - Front md.addVertex(rtf.x, rtf.y, lbb.z) #Right - Top - Back md.addVertex(lbb.x, rtf.y, lbb.z) #Left - Top - Back md.addVertex(lbb.x, rtf.y, lbb.z) #Left - Top - Back md.addVertex(lbb.x, lbb.y, lbb.z) #Left - Bottom - Back md.addVertex(lbb.x, lbb.y, lbb.z) #Left - Bottom - Back md.addVertex(rtf.x, lbb.y, lbb.z) #Right - Bottom - Back md.addVertex(rtf.x, lbb.y, lbb.z) #Right - Bottom - Back md.addVertex(rtf.x, rtf.y, lbb.z) #Right - Top - Back md.addVertex(rtf.x, rtf.y, rtf.z) #Right - Top - Front md.addVertex(rtf.x, rtf.y, lbb.z) #Right - Top - Back md.addVertex(lbb.x, rtf.y, rtf.z) #Left - Top - Front md.addVertex(lbb.x, rtf.y, lbb.z) #Left - Top - Back md.addVertex(lbb.x, lbb.y, rtf.z) #Left - Bottom - Front md.addVertex(lbb.x, lbb.y, lbb.z) #Left - Bottom - Back md.addVertex(rtf.x, lbb.y, rtf.z) #Right - Bottom - Front md.addVertex(rtf.x, lbb.y, lbb.z) #Right - Bottom - Back
<commit_before><commit_msg>Add a SceneNode subclass that can render a bounding box<commit_after>
from Cura.Math.AxisAlignedBox import AxisAlignedBox from Cura.Math.Vector import Vector from Cura.Scene.ToolHandle import ToolHandle class BoxRenderer(ToolHandle): def __init__(self, box, parent = None): super().__init__(parent) md = self.getMeshData() rtf = box.rightTopFront lbb = box.leftBottomBack md.addVertex(rtf.x, rtf.y, rtf.z) #Right - Top - Front md.addVertex(lbb.x, rtf.y, rtf.z) #Left - Top - Front md.addVertex(lbb.x, rtf.y, rtf.z) #Left - Top - Front md.addVertex(lbb.x, lbb.y, rtf.z) #Left - Bottom - Front md.addVertex(lbb.x, lbb.y, rtf.z) #Left - Bottom - Front md.addVertex(rtf.x, lbb.y, rtf.z) #Right - Bottom - Front md.addVertex(rtf.x, lbb.y, rtf.z) #Right - Bottom - Front md.addVertex(rtf.x, rtf.y, rtf.z) #Right - Top - Front md.addVertex(rtf.x, rtf.y, lbb.z) #Right - Top - Back md.addVertex(lbb.x, rtf.y, lbb.z) #Left - Top - Back md.addVertex(lbb.x, rtf.y, lbb.z) #Left - Top - Back md.addVertex(lbb.x, lbb.y, lbb.z) #Left - Bottom - Back md.addVertex(lbb.x, lbb.y, lbb.z) #Left - Bottom - Back md.addVertex(rtf.x, lbb.y, lbb.z) #Right - Bottom - Back md.addVertex(rtf.x, lbb.y, lbb.z) #Right - Bottom - Back md.addVertex(rtf.x, rtf.y, lbb.z) #Right - Top - Back md.addVertex(rtf.x, rtf.y, rtf.z) #Right - Top - Front md.addVertex(rtf.x, rtf.y, lbb.z) #Right - Top - Back md.addVertex(lbb.x, rtf.y, rtf.z) #Left - Top - Front md.addVertex(lbb.x, rtf.y, lbb.z) #Left - Top - Back md.addVertex(lbb.x, lbb.y, rtf.z) #Left - Bottom - Front md.addVertex(lbb.x, lbb.y, lbb.z) #Left - Bottom - Back md.addVertex(rtf.x, lbb.y, rtf.z) #Right - Bottom - Front md.addVertex(rtf.x, lbb.y, lbb.z) #Right - Bottom - Back
Add a SceneNode subclass that can render a bounding boxfrom Cura.Math.AxisAlignedBox import AxisAlignedBox from Cura.Math.Vector import Vector from Cura.Scene.ToolHandle import ToolHandle class BoxRenderer(ToolHandle): def __init__(self, box, parent = None): super().__init__(parent) md = self.getMeshData() rtf = box.rightTopFront lbb = box.leftBottomBack md.addVertex(rtf.x, rtf.y, rtf.z) #Right - Top - Front md.addVertex(lbb.x, rtf.y, rtf.z) #Left - Top - Front md.addVertex(lbb.x, rtf.y, rtf.z) #Left - Top - Front md.addVertex(lbb.x, lbb.y, rtf.z) #Left - Bottom - Front md.addVertex(lbb.x, lbb.y, rtf.z) #Left - Bottom - Front md.addVertex(rtf.x, lbb.y, rtf.z) #Right - Bottom - Front md.addVertex(rtf.x, lbb.y, rtf.z) #Right - Bottom - Front md.addVertex(rtf.x, rtf.y, rtf.z) #Right - Top - Front md.addVertex(rtf.x, rtf.y, lbb.z) #Right - Top - Back md.addVertex(lbb.x, rtf.y, lbb.z) #Left - Top - Back md.addVertex(lbb.x, rtf.y, lbb.z) #Left - Top - Back md.addVertex(lbb.x, lbb.y, lbb.z) #Left - Bottom - Back md.addVertex(lbb.x, lbb.y, lbb.z) #Left - Bottom - Back md.addVertex(rtf.x, lbb.y, lbb.z) #Right - Bottom - Back md.addVertex(rtf.x, lbb.y, lbb.z) #Right - Bottom - Back md.addVertex(rtf.x, rtf.y, lbb.z) #Right - Top - Back md.addVertex(rtf.x, rtf.y, rtf.z) #Right - Top - Front md.addVertex(rtf.x, rtf.y, lbb.z) #Right - Top - Back md.addVertex(lbb.x, rtf.y, rtf.z) #Left - Top - Front md.addVertex(lbb.x, rtf.y, lbb.z) #Left - Top - Back md.addVertex(lbb.x, lbb.y, rtf.z) #Left - Bottom - Front md.addVertex(lbb.x, lbb.y, lbb.z) #Left - Bottom - Back md.addVertex(rtf.x, lbb.y, rtf.z) #Right - Bottom - Front md.addVertex(rtf.x, lbb.y, lbb.z) #Right - Bottom - Back
<commit_before><commit_msg>Add a SceneNode subclass that can render a bounding box<commit_after>from Cura.Math.AxisAlignedBox import AxisAlignedBox from Cura.Math.Vector import Vector from Cura.Scene.ToolHandle import ToolHandle class BoxRenderer(ToolHandle): def __init__(self, box, parent = None): super().__init__(parent) md = self.getMeshData() rtf = box.rightTopFront lbb = box.leftBottomBack md.addVertex(rtf.x, rtf.y, rtf.z) #Right - Top - Front md.addVertex(lbb.x, rtf.y, rtf.z) #Left - Top - Front md.addVertex(lbb.x, rtf.y, rtf.z) #Left - Top - Front md.addVertex(lbb.x, lbb.y, rtf.z) #Left - Bottom - Front md.addVertex(lbb.x, lbb.y, rtf.z) #Left - Bottom - Front md.addVertex(rtf.x, lbb.y, rtf.z) #Right - Bottom - Front md.addVertex(rtf.x, lbb.y, rtf.z) #Right - Bottom - Front md.addVertex(rtf.x, rtf.y, rtf.z) #Right - Top - Front md.addVertex(rtf.x, rtf.y, lbb.z) #Right - Top - Back md.addVertex(lbb.x, rtf.y, lbb.z) #Left - Top - Back md.addVertex(lbb.x, rtf.y, lbb.z) #Left - Top - Back md.addVertex(lbb.x, lbb.y, lbb.z) #Left - Bottom - Back md.addVertex(lbb.x, lbb.y, lbb.z) #Left - Bottom - Back md.addVertex(rtf.x, lbb.y, lbb.z) #Right - Bottom - Back md.addVertex(rtf.x, lbb.y, lbb.z) #Right - Bottom - Back md.addVertex(rtf.x, rtf.y, lbb.z) #Right - Top - Back md.addVertex(rtf.x, rtf.y, rtf.z) #Right - Top - Front md.addVertex(rtf.x, rtf.y, lbb.z) #Right - Top - Back md.addVertex(lbb.x, rtf.y, rtf.z) #Left - Top - Front md.addVertex(lbb.x, rtf.y, lbb.z) #Left - Top - Back md.addVertex(lbb.x, lbb.y, rtf.z) #Left - Bottom - Front md.addVertex(lbb.x, lbb.y, lbb.z) #Left - Bottom - Back md.addVertex(rtf.x, lbb.y, rtf.z) #Right - Bottom - Front md.addVertex(rtf.x, lbb.y, lbb.z) #Right - Bottom - Back
358dc90f2e1925f186d216eeac83d28d5852b099
testing/text_feed_script.py
testing/text_feed_script.py
import sys # file location should be in the first arg filename = sys.argv[1] f = open(filename, "r") lines = f.readlines() f.close lastline = lines.pop() # remove last line # reopen the file in write mode f = open(filename, "w") for line in lines: f.write(line) f.close() print lastline.rstrip()
Add text feed testing script, for when we don't want to be talking to test the rest of the project
Add text feed testing script, for when we don't want to be talking to test the rest of the project
Python
mit
Nespa32/sm_project,Nespa32/sm_project,Nespa32/sm_project,Nespa32/sm_project,Nespa32/sm_project
Add text feed testing script, for when we don't want to be talking to test the rest of the project
import sys # file location should be in the first arg filename = sys.argv[1] f = open(filename, "r") lines = f.readlines() f.close lastline = lines.pop() # remove last line # reopen the file in write mode f = open(filename, "w") for line in lines: f.write(line) f.close() print lastline.rstrip()
<commit_before><commit_msg>Add text feed testing script, for when we don't want to be talking to test the rest of the project<commit_after>
import sys # file location should be in the first arg filename = sys.argv[1] f = open(filename, "r") lines = f.readlines() f.close lastline = lines.pop() # remove last line # reopen the file in write mode f = open(filename, "w") for line in lines: f.write(line) f.close() print lastline.rstrip()
Add text feed testing script, for when we don't want to be talking to test the rest of the project import sys # file location should be in the first arg filename = sys.argv[1] f = open(filename, "r") lines = f.readlines() f.close lastline = lines.pop() # remove last line # reopen the file in write mode f = open(filename, "w") for line in lines: f.write(line) f.close() print lastline.rstrip()
<commit_before><commit_msg>Add text feed testing script, for when we don't want to be talking to test the rest of the project<commit_after> import sys # file location should be in the first arg filename = sys.argv[1] f = open(filename, "r") lines = f.readlines() f.close lastline = lines.pop() # remove last line # reopen the file in write mode f = open(filename, "w") for line in lines: f.write(line) f.close() print lastline.rstrip()
2305380ba28805cd42eb63db35440f77ededd1a2
test/streamparse/cli/test_run.py
test/streamparse/cli/test_run.py
from __future__ import absolute_import, unicode_literals import argparse import unittest from nose.tools import ok_ try: from unittest.mock import patch except ImportError: from mock import patch from streamparse.cli.run import main, subparser_hook class RunTestCase(unittest.TestCase): def test_subparser_hook(self): parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() subparser_hook(subparsers) subcommands = parser._optionals._actions[1].choices.keys() ok_('run' in subcommands) @patch('streamparse.cli.run.run_local_topology', autospec=True) def test_main_args_passed(self, run_local_mock): parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() subparser_hook(subparsers) args = parser.parse_args('run -e my_env -n my_topo --ackers 1'.split()) main(args) run_local_mock.assert_called_with(name='my_topo', options={'topology.acker.executors': 1}, env_name='my_env', time=0) if __name__ == '__main__': unittest.main()
Add simple tests for sparse run
Add simple tests for sparse run
Python
apache-2.0
Parsely/streamparse,codywilbourn/streamparse,codywilbourn/streamparse,Parsely/streamparse
Add simple tests for sparse run
from __future__ import absolute_import, unicode_literals import argparse import unittest from nose.tools import ok_ try: from unittest.mock import patch except ImportError: from mock import patch from streamparse.cli.run import main, subparser_hook class RunTestCase(unittest.TestCase): def test_subparser_hook(self): parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() subparser_hook(subparsers) subcommands = parser._optionals._actions[1].choices.keys() ok_('run' in subcommands) @patch('streamparse.cli.run.run_local_topology', autospec=True) def test_main_args_passed(self, run_local_mock): parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() subparser_hook(subparsers) args = parser.parse_args('run -e my_env -n my_topo --ackers 1'.split()) main(args) run_local_mock.assert_called_with(name='my_topo', options={'topology.acker.executors': 1}, env_name='my_env', time=0) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Add simple tests for sparse run<commit_after>
from __future__ import absolute_import, unicode_literals import argparse import unittest from nose.tools import ok_ try: from unittest.mock import patch except ImportError: from mock import patch from streamparse.cli.run import main, subparser_hook class RunTestCase(unittest.TestCase): def test_subparser_hook(self): parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() subparser_hook(subparsers) subcommands = parser._optionals._actions[1].choices.keys() ok_('run' in subcommands) @patch('streamparse.cli.run.run_local_topology', autospec=True) def test_main_args_passed(self, run_local_mock): parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() subparser_hook(subparsers) args = parser.parse_args('run -e my_env -n my_topo --ackers 1'.split()) main(args) run_local_mock.assert_called_with(name='my_topo', options={'topology.acker.executors': 1}, env_name='my_env', time=0) if __name__ == '__main__': unittest.main()
Add simple tests for sparse runfrom __future__ import absolute_import, unicode_literals import argparse import unittest from nose.tools import ok_ try: from unittest.mock import patch except ImportError: from mock import patch from streamparse.cli.run import main, subparser_hook class RunTestCase(unittest.TestCase): def test_subparser_hook(self): parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() subparser_hook(subparsers) subcommands = parser._optionals._actions[1].choices.keys() ok_('run' in subcommands) @patch('streamparse.cli.run.run_local_topology', autospec=True) def test_main_args_passed(self, run_local_mock): parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() subparser_hook(subparsers) args = parser.parse_args('run -e my_env -n my_topo --ackers 1'.split()) main(args) run_local_mock.assert_called_with(name='my_topo', options={'topology.acker.executors': 1}, env_name='my_env', time=0) if __name__ == '__main__': unittest.main()
<commit_before><commit_msg>Add simple tests for sparse run<commit_after>from __future__ import absolute_import, unicode_literals import argparse import unittest from nose.tools import ok_ try: from unittest.mock import patch except ImportError: from mock import patch from streamparse.cli.run import main, subparser_hook class RunTestCase(unittest.TestCase): def test_subparser_hook(self): parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() subparser_hook(subparsers) subcommands = parser._optionals._actions[1].choices.keys() ok_('run' in subcommands) @patch('streamparse.cli.run.run_local_topology', autospec=True) def test_main_args_passed(self, run_local_mock): parser = argparse.ArgumentParser() subparsers = parser.add_subparsers() subparser_hook(subparsers) args = parser.parse_args('run -e my_env -n my_topo --ackers 1'.split()) main(args) run_local_mock.assert_called_with(name='my_topo', options={'topology.acker.executors': 1}, env_name='my_env', time=0) if __name__ == '__main__': unittest.main()
9f5de36cbb9cff0f8bb258de3ca5b9c2a27e72a8
bioagents/qca/lispify_helper.py
bioagents/qca/lispify_helper.py
__author__ = 'aarongary' class Lispify(): def __init__(self, obj): if(obj is None): raise ValueError("Please provide an object") self.obj = obj def to_lisp(self): if type(self.obj) is unicode: return self.lispify(str(self.obj)) else: return self.lispify(self.obj) def lispify(self, L, indra_statement=False): "Convert a Python object L to a lisp representation." if (isinstance(L, str) or isinstance(L, float) or isinstance(L, int)): if indra_statement: return '"%s"' % L else: return L elif (isinstance(L, list) or isinstance(L, tuple)): s = [] for element in L: s += [self.lispify(element)] return '(' + ' '.join(s) + ')' elif isinstance(L, dict): s = [] for key in L: #print "key: " + key #print L[key] tmp_key = str(key) if not tmp_key.isalnum(): tmp_key = '"%s"' % tmp_key if key == "INDRA statement": s += [":{0} {1}".format(tmp_key, self.lispify(L[key], True))] else: s += [":{0} {1}".format(tmp_key, self.lispify(L[key], False))] return '(' + ' '.join(s) + ')' elif isinstance(L, unicode): if indra_statement: return '"%s"' % str(L) else: return str(L) else: return L
Add Lispify class to project
Add Lispify class to project
Python
bsd-2-clause
bgyori/bioagents,sorgerlab/bioagents
Add Lispify class to project
__author__ = 'aarongary' class Lispify(): def __init__(self, obj): if(obj is None): raise ValueError("Please provide an object") self.obj = obj def to_lisp(self): if type(self.obj) is unicode: return self.lispify(str(self.obj)) else: return self.lispify(self.obj) def lispify(self, L, indra_statement=False): "Convert a Python object L to a lisp representation." if (isinstance(L, str) or isinstance(L, float) or isinstance(L, int)): if indra_statement: return '"%s"' % L else: return L elif (isinstance(L, list) or isinstance(L, tuple)): s = [] for element in L: s += [self.lispify(element)] return '(' + ' '.join(s) + ')' elif isinstance(L, dict): s = [] for key in L: #print "key: " + key #print L[key] tmp_key = str(key) if not tmp_key.isalnum(): tmp_key = '"%s"' % tmp_key if key == "INDRA statement": s += [":{0} {1}".format(tmp_key, self.lispify(L[key], True))] else: s += [":{0} {1}".format(tmp_key, self.lispify(L[key], False))] return '(' + ' '.join(s) + ')' elif isinstance(L, unicode): if indra_statement: return '"%s"' % str(L) else: return str(L) else: return L
<commit_before><commit_msg>Add Lispify class to project<commit_after>
__author__ = 'aarongary' class Lispify(): def __init__(self, obj): if(obj is None): raise ValueError("Please provide an object") self.obj = obj def to_lisp(self): if type(self.obj) is unicode: return self.lispify(str(self.obj)) else: return self.lispify(self.obj) def lispify(self, L, indra_statement=False): "Convert a Python object L to a lisp representation." if (isinstance(L, str) or isinstance(L, float) or isinstance(L, int)): if indra_statement: return '"%s"' % L else: return L elif (isinstance(L, list) or isinstance(L, tuple)): s = [] for element in L: s += [self.lispify(element)] return '(' + ' '.join(s) + ')' elif isinstance(L, dict): s = [] for key in L: #print "key: " + key #print L[key] tmp_key = str(key) if not tmp_key.isalnum(): tmp_key = '"%s"' % tmp_key if key == "INDRA statement": s += [":{0} {1}".format(tmp_key, self.lispify(L[key], True))] else: s += [":{0} {1}".format(tmp_key, self.lispify(L[key], False))] return '(' + ' '.join(s) + ')' elif isinstance(L, unicode): if indra_statement: return '"%s"' % str(L) else: return str(L) else: return L
Add Lispify class to project__author__ = 'aarongary' class Lispify(): def __init__(self, obj): if(obj is None): raise ValueError("Please provide an object") self.obj = obj def to_lisp(self): if type(self.obj) is unicode: return self.lispify(str(self.obj)) else: return self.lispify(self.obj) def lispify(self, L, indra_statement=False): "Convert a Python object L to a lisp representation." if (isinstance(L, str) or isinstance(L, float) or isinstance(L, int)): if indra_statement: return '"%s"' % L else: return L elif (isinstance(L, list) or isinstance(L, tuple)): s = [] for element in L: s += [self.lispify(element)] return '(' + ' '.join(s) + ')' elif isinstance(L, dict): s = [] for key in L: #print "key: " + key #print L[key] tmp_key = str(key) if not tmp_key.isalnum(): tmp_key = '"%s"' % tmp_key if key == "INDRA statement": s += [":{0} {1}".format(tmp_key, self.lispify(L[key], True))] else: s += [":{0} {1}".format(tmp_key, self.lispify(L[key], False))] return '(' + ' '.join(s) + ')' elif isinstance(L, unicode): if indra_statement: return '"%s"' % str(L) else: return str(L) else: return L
<commit_before><commit_msg>Add Lispify class to project<commit_after>__author__ = 'aarongary' class Lispify(): def __init__(self, obj): if(obj is None): raise ValueError("Please provide an object") self.obj = obj def to_lisp(self): if type(self.obj) is unicode: return self.lispify(str(self.obj)) else: return self.lispify(self.obj) def lispify(self, L, indra_statement=False): "Convert a Python object L to a lisp representation." if (isinstance(L, str) or isinstance(L, float) or isinstance(L, int)): if indra_statement: return '"%s"' % L else: return L elif (isinstance(L, list) or isinstance(L, tuple)): s = [] for element in L: s += [self.lispify(element)] return '(' + ' '.join(s) + ')' elif isinstance(L, dict): s = [] for key in L: #print "key: " + key #print L[key] tmp_key = str(key) if not tmp_key.isalnum(): tmp_key = '"%s"' % tmp_key if key == "INDRA statement": s += [":{0} {1}".format(tmp_key, self.lispify(L[key], True))] else: s += [":{0} {1}".format(tmp_key, self.lispify(L[key], False))] return '(' + ' '.join(s) + ')' elif isinstance(L, unicode): if indra_statement: return '"%s"' % str(L) else: return str(L) else: return L
cef8e7a6e044405bc4b18b56351b20e1b27e17fe
glaciercmd/command_get_job_info_for_vault.py
glaciercmd/command_get_job_info_for_vault.py
import boto from boto.glacier.exceptions import UnexpectedHTTPResponseError class CommandGetJobInfoForVault(object): def execute(self, args, config): glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret')) try: vault = glacier_connection.get_vault(args[6]) except: vault = None if vault is None: print "Vault named '{}' does not exist.".format(args[6]) else: try: job = vault.get_job(args[3]) print "Job info:\n\tAction: {}\n\tArchive Id: {}\n\tArchive Size:{}\n\tCompleted: {}\n\tCompletion Date: {}\n\tCreation Date: {} \n\tInventory Size: {}\n\tDescription: {}\n\tJobID: {}\n\tSNSTopic: {}\n\tStatus Code: {}\n\tStatus Message: {}\n".format(job.action, job.archive_id, job.archive_size, job.completed, job.completion_date, job.creation_date, job.inventory_size, job.description, job.id, job.sns_topic, job.status_code, job.status_message) except UnexpectedHTTPResponseError as error: print "Job '{}' can not be found:\n\t {}".format(args[3], error) def accept(self, args): return len(args) >= 7 and args[0] == 'get' and args[1] == 'job' and args[2] == 'info' and args[4] == 'for' and args[5] == 'vault' def help(self): return "get job info <job id> for vault <vault name>" def command_init(): return CommandGetJobInfoForVault()
Add command to get job info
Add command to get job info
Python
mit
carsonmcdonald/glacier-cmd
Add command to get job info
import boto from boto.glacier.exceptions import UnexpectedHTTPResponseError class CommandGetJobInfoForVault(object): def execute(self, args, config): glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret')) try: vault = glacier_connection.get_vault(args[6]) except: vault = None if vault is None: print "Vault named '{}' does not exist.".format(args[6]) else: try: job = vault.get_job(args[3]) print "Job info:\n\tAction: {}\n\tArchive Id: {}\n\tArchive Size:{}\n\tCompleted: {}\n\tCompletion Date: {}\n\tCreation Date: {} \n\tInventory Size: {}\n\tDescription: {}\n\tJobID: {}\n\tSNSTopic: {}\n\tStatus Code: {}\n\tStatus Message: {}\n".format(job.action, job.archive_id, job.archive_size, job.completed, job.completion_date, job.creation_date, job.inventory_size, job.description, job.id, job.sns_topic, job.status_code, job.status_message) except UnexpectedHTTPResponseError as error: print "Job '{}' can not be found:\n\t {}".format(args[3], error) def accept(self, args): return len(args) >= 7 and args[0] == 'get' and args[1] == 'job' and args[2] == 'info' and args[4] == 'for' and args[5] == 'vault' def help(self): return "get job info <job id> for vault <vault name>" def command_init(): return CommandGetJobInfoForVault()
<commit_before><commit_msg>Add command to get job info<commit_after>
import boto from boto.glacier.exceptions import UnexpectedHTTPResponseError class CommandGetJobInfoForVault(object): def execute(self, args, config): glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret')) try: vault = glacier_connection.get_vault(args[6]) except: vault = None if vault is None: print "Vault named '{}' does not exist.".format(args[6]) else: try: job = vault.get_job(args[3]) print "Job info:\n\tAction: {}\n\tArchive Id: {}\n\tArchive Size:{}\n\tCompleted: {}\n\tCompletion Date: {}\n\tCreation Date: {} \n\tInventory Size: {}\n\tDescription: {}\n\tJobID: {}\n\tSNSTopic: {}\n\tStatus Code: {}\n\tStatus Message: {}\n".format(job.action, job.archive_id, job.archive_size, job.completed, job.completion_date, job.creation_date, job.inventory_size, job.description, job.id, job.sns_topic, job.status_code, job.status_message) except UnexpectedHTTPResponseError as error: print "Job '{}' can not be found:\n\t {}".format(args[3], error) def accept(self, args): return len(args) >= 7 and args[0] == 'get' and args[1] == 'job' and args[2] == 'info' and args[4] == 'for' and args[5] == 'vault' def help(self): return "get job info <job id> for vault <vault name>" def command_init(): return CommandGetJobInfoForVault()
Add command to get job infoimport boto from boto.glacier.exceptions import UnexpectedHTTPResponseError class CommandGetJobInfoForVault(object): def execute(self, args, config): glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret')) try: vault = glacier_connection.get_vault(args[6]) except: vault = None if vault is None: print "Vault named '{}' does not exist.".format(args[6]) else: try: job = vault.get_job(args[3]) print "Job info:\n\tAction: {}\n\tArchive Id: {}\n\tArchive Size:{}\n\tCompleted: {}\n\tCompletion Date: {}\n\tCreation Date: {} \n\tInventory Size: {}\n\tDescription: {}\n\tJobID: {}\n\tSNSTopic: {}\n\tStatus Code: {}\n\tStatus Message: {}\n".format(job.action, job.archive_id, job.archive_size, job.completed, job.completion_date, job.creation_date, job.inventory_size, job.description, job.id, job.sns_topic, job.status_code, job.status_message) except UnexpectedHTTPResponseError as error: print "Job '{}' can not be found:\n\t {}".format(args[3], error) def accept(self, args): return len(args) >= 7 and args[0] == 'get' and args[1] == 'job' and args[2] == 'info' and args[4] == 'for' and args[5] == 'vault' def help(self): return "get job info <job id> for vault <vault name>" def command_init(): return CommandGetJobInfoForVault()
<commit_before><commit_msg>Add command to get job info<commit_after>import boto from boto.glacier.exceptions import UnexpectedHTTPResponseError class CommandGetJobInfoForVault(object): def execute(self, args, config): glacier_connection = boto.connect_glacier(aws_access_key_id=config.get('configuration', 'aws_key'), aws_secret_access_key=config.get('configuration', 'aws_secret')) try: vault = glacier_connection.get_vault(args[6]) except: vault = None if vault is None: print "Vault named '{}' does not exist.".format(args[6]) else: try: job = vault.get_job(args[3]) print "Job info:\n\tAction: {}\n\tArchive Id: {}\n\tArchive Size:{}\n\tCompleted: {}\n\tCompletion Date: {}\n\tCreation Date: {} \n\tInventory Size: {}\n\tDescription: {}\n\tJobID: {}\n\tSNSTopic: {}\n\tStatus Code: {}\n\tStatus Message: {}\n".format(job.action, job.archive_id, job.archive_size, job.completed, job.completion_date, job.creation_date, job.inventory_size, job.description, job.id, job.sns_topic, job.status_code, job.status_message) except UnexpectedHTTPResponseError as error: print "Job '{}' can not be found:\n\t {}".format(args[3], error) def accept(self, args): return len(args) >= 7 and args[0] == 'get' and args[1] == 'job' and args[2] == 'info' and args[4] == 'for' and args[5] == 'vault' def help(self): return "get job info <job id> for vault <vault name>" def command_init(): return CommandGetJobInfoForVault()
49868b7bac42ea49e37a20bf49c5d6131ef12a84
kufpybio/uniprotrestapi.py
kufpybio/uniprotrestapi.py
# http://www.uniprot.org/faq/28 """This serves just the download of files. Parsing can be done using Biopython: from Bio import SeqIO uniprot_entry = SeqIO.read(open("uniprot_files/A8Z556.xml"), "uniprot-xml") """ import os import urllib.request from restapi import RESTAPI class UniprotRESTAPI(RESTAPI): def __init__(self, download_folder="uniprot_files"): self._download_folder = download_folder self._base_url = "http://www.uniprot.org/uniprot/" self._create_download_folder() def uniprot_protein_xml(self, uniprot_id): """e.g. Q5FJ41""" return(self._get_data("%s/%s.xml", "%s.xml", uniprot_id))
Add REST API for Uniprot
Add REST API for Uniprot
Python
isc
konrad/kufpybio
Add REST API for Uniprot
# http://www.uniprot.org/faq/28 """This serves just the download of files. Parsing can be done using Biopython: from Bio import SeqIO uniprot_entry = SeqIO.read(open("uniprot_files/A8Z556.xml"), "uniprot-xml") """ import os import urllib.request from restapi import RESTAPI class UniprotRESTAPI(RESTAPI): def __init__(self, download_folder="uniprot_files"): self._download_folder = download_folder self._base_url = "http://www.uniprot.org/uniprot/" self._create_download_folder() def uniprot_protein_xml(self, uniprot_id): """e.g. Q5FJ41""" return(self._get_data("%s/%s.xml", "%s.xml", uniprot_id))
<commit_before><commit_msg>Add REST API for Uniprot<commit_after>
# http://www.uniprot.org/faq/28 """This serves just the download of files. Parsing can be done using Biopython: from Bio import SeqIO uniprot_entry = SeqIO.read(open("uniprot_files/A8Z556.xml"), "uniprot-xml") """ import os import urllib.request from restapi import RESTAPI class UniprotRESTAPI(RESTAPI): def __init__(self, download_folder="uniprot_files"): self._download_folder = download_folder self._base_url = "http://www.uniprot.org/uniprot/" self._create_download_folder() def uniprot_protein_xml(self, uniprot_id): """e.g. Q5FJ41""" return(self._get_data("%s/%s.xml", "%s.xml", uniprot_id))
Add REST API for Uniprot# http://www.uniprot.org/faq/28 """This serves just the download of files. Parsing can be done using Biopython: from Bio import SeqIO uniprot_entry = SeqIO.read(open("uniprot_files/A8Z556.xml"), "uniprot-xml") """ import os import urllib.request from restapi import RESTAPI class UniprotRESTAPI(RESTAPI): def __init__(self, download_folder="uniprot_files"): self._download_folder = download_folder self._base_url = "http://www.uniprot.org/uniprot/" self._create_download_folder() def uniprot_protein_xml(self, uniprot_id): """e.g. Q5FJ41""" return(self._get_data("%s/%s.xml", "%s.xml", uniprot_id))
<commit_before><commit_msg>Add REST API for Uniprot<commit_after># http://www.uniprot.org/faq/28 """This serves just the download of files. Parsing can be done using Biopython: from Bio import SeqIO uniprot_entry = SeqIO.read(open("uniprot_files/A8Z556.xml"), "uniprot-xml") """ import os import urllib.request from restapi import RESTAPI class UniprotRESTAPI(RESTAPI): def __init__(self, download_folder="uniprot_files"): self._download_folder = download_folder self._base_url = "http://www.uniprot.org/uniprot/" self._create_download_folder() def uniprot_protein_xml(self, uniprot_id): """e.g. Q5FJ41""" return(self._get_data("%s/%s.xml", "%s.xml", uniprot_id))
95846633b9206b62bee01e34f65e5c9b64bcf375
hw/ip/alert_handler/doc/reg_alert_handler.py
hw/ip/alert_handler/doc/reg_alert_handler.py
#!/usr/bin/env python3 # Copyright lowRISC contributors. # Licensed under the Apache License, Version 2.0, see LICENSE for details. # SPDX-License-Identifier: Apache-2.0 r"""Convert mako template to hjson register description """ import argparse import sys from io import StringIO from mako.template import Template def main(): parser = argparse.ArgumentParser(prog="reg_alert_handler") parser.add_argument('input', nargs='?', metavar='file', type=argparse.FileType('r'), default=sys.stdin, help='input template file') parser.add_argument('--n_alerts', type=int, help='Number of Alert Sources') parser.add_argument('--esc_cnt_dw', type=int, default=32, help='Width of escalation counter') parser.add_argument('--accu_cnt_dw', type=int, default=16, help='Width of accumulator') parser.add_argument('--lfsr_seed', type=int, default=2**31-1, help='Width of accumulator') args = parser.parse_args() if (args.lfsr_seed & 0xFFFF_FFFF) == 0 or args.lfsr_seed > 2**32: parser.error("LFSR seed out of range or zero") # Determine output: if stdin then stdout if not then ?? out = StringIO() reg_tpl = Template(args.input.read()) out.write( reg_tpl.render(n_alerts=args.n_alerts, n_classes=4, # not fully parameterized yet esc_cnt_dw=args.esc_cnt_dw, accu_cnt_dw=args.accu_cnt_dw, lfsr_seed=args.lfsr_seed, reg_dw=32)) # 32bit regfile print(out.getvalue()) out.close() if __name__ == "__main__": main()
Add generator script for alert handler
[alert_handler] Add generator script for alert handler
Python
apache-2.0
lowRISC/opentitan,lowRISC/opentitan,lowRISC/opentitan,lowRISC/opentitan,lowRISC/opentitan,lowRISC/opentitan
[alert_handler] Add generator script for alert handler
#!/usr/bin/env python3 # Copyright lowRISC contributors. # Licensed under the Apache License, Version 2.0, see LICENSE for details. # SPDX-License-Identifier: Apache-2.0 r"""Convert mako template to hjson register description """ import argparse import sys from io import StringIO from mako.template import Template def main(): parser = argparse.ArgumentParser(prog="reg_alert_handler") parser.add_argument('input', nargs='?', metavar='file', type=argparse.FileType('r'), default=sys.stdin, help='input template file') parser.add_argument('--n_alerts', type=int, help='Number of Alert Sources') parser.add_argument('--esc_cnt_dw', type=int, default=32, help='Width of escalation counter') parser.add_argument('--accu_cnt_dw', type=int, default=16, help='Width of accumulator') parser.add_argument('--lfsr_seed', type=int, default=2**31-1, help='Width of accumulator') args = parser.parse_args() if (args.lfsr_seed & 0xFFFF_FFFF) == 0 or args.lfsr_seed > 2**32: parser.error("LFSR seed out of range or zero") # Determine output: if stdin then stdout if not then ?? out = StringIO() reg_tpl = Template(args.input.read()) out.write( reg_tpl.render(n_alerts=args.n_alerts, n_classes=4, # not fully parameterized yet esc_cnt_dw=args.esc_cnt_dw, accu_cnt_dw=args.accu_cnt_dw, lfsr_seed=args.lfsr_seed, reg_dw=32)) # 32bit regfile print(out.getvalue()) out.close() if __name__ == "__main__": main()
<commit_before><commit_msg>[alert_handler] Add generator script for alert handler<commit_after>
#!/usr/bin/env python3 # Copyright lowRISC contributors. # Licensed under the Apache License, Version 2.0, see LICENSE for details. # SPDX-License-Identifier: Apache-2.0 r"""Convert mako template to hjson register description """ import argparse import sys from io import StringIO from mako.template import Template def main(): parser = argparse.ArgumentParser(prog="reg_alert_handler") parser.add_argument('input', nargs='?', metavar='file', type=argparse.FileType('r'), default=sys.stdin, help='input template file') parser.add_argument('--n_alerts', type=int, help='Number of Alert Sources') parser.add_argument('--esc_cnt_dw', type=int, default=32, help='Width of escalation counter') parser.add_argument('--accu_cnt_dw', type=int, default=16, help='Width of accumulator') parser.add_argument('--lfsr_seed', type=int, default=2**31-1, help='Width of accumulator') args = parser.parse_args() if (args.lfsr_seed & 0xFFFF_FFFF) == 0 or args.lfsr_seed > 2**32: parser.error("LFSR seed out of range or zero") # Determine output: if stdin then stdout if not then ?? out = StringIO() reg_tpl = Template(args.input.read()) out.write( reg_tpl.render(n_alerts=args.n_alerts, n_classes=4, # not fully parameterized yet esc_cnt_dw=args.esc_cnt_dw, accu_cnt_dw=args.accu_cnt_dw, lfsr_seed=args.lfsr_seed, reg_dw=32)) # 32bit regfile print(out.getvalue()) out.close() if __name__ == "__main__": main()
[alert_handler] Add generator script for alert handler#!/usr/bin/env python3 # Copyright lowRISC contributors. # Licensed under the Apache License, Version 2.0, see LICENSE for details. # SPDX-License-Identifier: Apache-2.0 r"""Convert mako template to hjson register description """ import argparse import sys from io import StringIO from mako.template import Template def main(): parser = argparse.ArgumentParser(prog="reg_alert_handler") parser.add_argument('input', nargs='?', metavar='file', type=argparse.FileType('r'), default=sys.stdin, help='input template file') parser.add_argument('--n_alerts', type=int, help='Number of Alert Sources') parser.add_argument('--esc_cnt_dw', type=int, default=32, help='Width of escalation counter') parser.add_argument('--accu_cnt_dw', type=int, default=16, help='Width of accumulator') parser.add_argument('--lfsr_seed', type=int, default=2**31-1, help='Width of accumulator') args = parser.parse_args() if (args.lfsr_seed & 0xFFFF_FFFF) == 0 or args.lfsr_seed > 2**32: parser.error("LFSR seed out of range or zero") # Determine output: if stdin then stdout if not then ?? out = StringIO() reg_tpl = Template(args.input.read()) out.write( reg_tpl.render(n_alerts=args.n_alerts, n_classes=4, # not fully parameterized yet esc_cnt_dw=args.esc_cnt_dw, accu_cnt_dw=args.accu_cnt_dw, lfsr_seed=args.lfsr_seed, reg_dw=32)) # 32bit regfile print(out.getvalue()) out.close() if __name__ == "__main__": main()
<commit_before><commit_msg>[alert_handler] Add generator script for alert handler<commit_after>#!/usr/bin/env python3 # Copyright lowRISC contributors. # Licensed under the Apache License, Version 2.0, see LICENSE for details. # SPDX-License-Identifier: Apache-2.0 r"""Convert mako template to hjson register description """ import argparse import sys from io import StringIO from mako.template import Template def main(): parser = argparse.ArgumentParser(prog="reg_alert_handler") parser.add_argument('input', nargs='?', metavar='file', type=argparse.FileType('r'), default=sys.stdin, help='input template file') parser.add_argument('--n_alerts', type=int, help='Number of Alert Sources') parser.add_argument('--esc_cnt_dw', type=int, default=32, help='Width of escalation counter') parser.add_argument('--accu_cnt_dw', type=int, default=16, help='Width of accumulator') parser.add_argument('--lfsr_seed', type=int, default=2**31-1, help='Width of accumulator') args = parser.parse_args() if (args.lfsr_seed & 0xFFFF_FFFF) == 0 or args.lfsr_seed > 2**32: parser.error("LFSR seed out of range or zero") # Determine output: if stdin then stdout if not then ?? out = StringIO() reg_tpl = Template(args.input.read()) out.write( reg_tpl.render(n_alerts=args.n_alerts, n_classes=4, # not fully parameterized yet esc_cnt_dw=args.esc_cnt_dw, accu_cnt_dw=args.accu_cnt_dw, lfsr_seed=args.lfsr_seed, reg_dw=32)) # 32bit regfile print(out.getvalue()) out.close() if __name__ == "__main__": main()
b2cb6cd8461cfb9ec748754d7d314acc12221769
workshops/test/test_lookups.py
workshops/test/test_lookups.py
from django.core.urlresolvers import reverse from .base import TestBase class TestLookups(TestBase): """Test suite for django-selectable lookups.""" def test_login_regression(self): """Make sure lookups are login-protected""" url_name = 'selectable-lookup' rv = self.client.get(reverse(url_name, args=['workshops-sitelookup'])) assert rv.status_code == 401 # unauthorized self._setUpUsersAndLogin() rv = self.client.get(reverse(url_name, args=['workshops-sitelookup'])) assert rv.status_code == 200
Add regression test for login-required lookups
Add regression test for login-required lookups New test makes sure that we never accidentally disable @login_required on our lookups.
Python
mit
wking/swc-amy,wking/swc-amy,wking/swc-amy,vahtras/amy,shapiromatron/amy,pbanaszkiewicz/amy,wking/swc-amy,vahtras/amy,shapiromatron/amy,vahtras/amy,swcarpentry/amy,swcarpentry/amy,swcarpentry/amy,pbanaszkiewicz/amy,pbanaszkiewicz/amy,shapiromatron/amy
Add regression test for login-required lookups New test makes sure that we never accidentally disable @login_required on our lookups.
from django.core.urlresolvers import reverse from .base import TestBase class TestLookups(TestBase): """Test suite for django-selectable lookups.""" def test_login_regression(self): """Make sure lookups are login-protected""" url_name = 'selectable-lookup' rv = self.client.get(reverse(url_name, args=['workshops-sitelookup'])) assert rv.status_code == 401 # unauthorized self._setUpUsersAndLogin() rv = self.client.get(reverse(url_name, args=['workshops-sitelookup'])) assert rv.status_code == 200
<commit_before><commit_msg>Add regression test for login-required lookups New test makes sure that we never accidentally disable @login_required on our lookups.<commit_after>
from django.core.urlresolvers import reverse from .base import TestBase class TestLookups(TestBase): """Test suite for django-selectable lookups.""" def test_login_regression(self): """Make sure lookups are login-protected""" url_name = 'selectable-lookup' rv = self.client.get(reverse(url_name, args=['workshops-sitelookup'])) assert rv.status_code == 401 # unauthorized self._setUpUsersAndLogin() rv = self.client.get(reverse(url_name, args=['workshops-sitelookup'])) assert rv.status_code == 200
Add regression test for login-required lookups New test makes sure that we never accidentally disable @login_required on our lookups.from django.core.urlresolvers import reverse from .base import TestBase class TestLookups(TestBase): """Test suite for django-selectable lookups.""" def test_login_regression(self): """Make sure lookups are login-protected""" url_name = 'selectable-lookup' rv = self.client.get(reverse(url_name, args=['workshops-sitelookup'])) assert rv.status_code == 401 # unauthorized self._setUpUsersAndLogin() rv = self.client.get(reverse(url_name, args=['workshops-sitelookup'])) assert rv.status_code == 200
<commit_before><commit_msg>Add regression test for login-required lookups New test makes sure that we never accidentally disable @login_required on our lookups.<commit_after>from django.core.urlresolvers import reverse from .base import TestBase class TestLookups(TestBase): """Test suite for django-selectable lookups.""" def test_login_regression(self): """Make sure lookups are login-protected""" url_name = 'selectable-lookup' rv = self.client.get(reverse(url_name, args=['workshops-sitelookup'])) assert rv.status_code == 401 # unauthorized self._setUpUsersAndLogin() rv = self.client.get(reverse(url_name, args=['workshops-sitelookup'])) assert rv.status_code == 200
798d0e1cbb0b64452f8a9eaa46c7202ffb299193
cybox/test/objects/file_test.py
cybox/test/objects/file_test.py
import unittest from cybox.objects.file_object import File from cybox.test import round_trip from cybox.test.objects import ObjectTestCase class TestFile(unittest.TestCase, ObjectTestCase): object_type = "FileObjectType" klass = File def test_filepath_is_none(self): # This would throw an exception at one point. Should be fixed now. a = File.from_dict({'file_name': 'abcd.dll'}) def test_round_trip(self): pass if __name__ == "__main__": unittest.main()
Add a test related to creating File objects
Add a test related to creating File objects This tests a bug which was present as of 7666b90ed657fa550283bfceefb938f27f7372b5 but was fixed in 0ba7cdb9bac25bac864d87b52da1ccb6b1e94d9e.
Python
bsd-3-clause
CybOXProject/python-cybox
Add a test related to creating File objects This tests a bug which was present as of 7666b90ed657fa550283bfceefb938f27f7372b5 but was fixed in 0ba7cdb9bac25bac864d87b52da1ccb6b1e94d9e.
import unittest from cybox.objects.file_object import File from cybox.test import round_trip from cybox.test.objects import ObjectTestCase class TestFile(unittest.TestCase, ObjectTestCase): object_type = "FileObjectType" klass = File def test_filepath_is_none(self): # This would throw an exception at one point. Should be fixed now. a = File.from_dict({'file_name': 'abcd.dll'}) def test_round_trip(self): pass if __name__ == "__main__": unittest.main()
<commit_before><commit_msg>Add a test related to creating File objects This tests a bug which was present as of 7666b90ed657fa550283bfceefb938f27f7372b5 but was fixed in 0ba7cdb9bac25bac864d87b52da1ccb6b1e94d9e.<commit_after>
import unittest from cybox.objects.file_object import File from cybox.test import round_trip from cybox.test.objects import ObjectTestCase class TestFile(unittest.TestCase, ObjectTestCase): object_type = "FileObjectType" klass = File def test_filepath_is_none(self): # This would throw an exception at one point. Should be fixed now. a = File.from_dict({'file_name': 'abcd.dll'}) def test_round_trip(self): pass if __name__ == "__main__": unittest.main()
Add a test related to creating File objects This tests a bug which was present as of 7666b90ed657fa550283bfceefb938f27f7372b5 but was fixed in 0ba7cdb9bac25bac864d87b52da1ccb6b1e94d9e.import unittest from cybox.objects.file_object import File from cybox.test import round_trip from cybox.test.objects import ObjectTestCase class TestFile(unittest.TestCase, ObjectTestCase): object_type = "FileObjectType" klass = File def test_filepath_is_none(self): # This would throw an exception at one point. Should be fixed now. a = File.from_dict({'file_name': 'abcd.dll'}) def test_round_trip(self): pass if __name__ == "__main__": unittest.main()
<commit_before><commit_msg>Add a test related to creating File objects This tests a bug which was present as of 7666b90ed657fa550283bfceefb938f27f7372b5 but was fixed in 0ba7cdb9bac25bac864d87b52da1ccb6b1e94d9e.<commit_after>import unittest from cybox.objects.file_object import File from cybox.test import round_trip from cybox.test.objects import ObjectTestCase class TestFile(unittest.TestCase, ObjectTestCase): object_type = "FileObjectType" klass = File def test_filepath_is_none(self): # This would throw an exception at one point. Should be fixed now. a = File.from_dict({'file_name': 'abcd.dll'}) def test_round_trip(self): pass if __name__ == "__main__": unittest.main()
4a2d92c3c34ce442ba8dde00fe89e7f7ad125e05
Modules/Biophotonics/python/iMC/msi/test/test_tiffwriter.py
Modules/Biophotonics/python/iMC/msi/test/test_tiffwriter.py
import unittest import os from msi.io.tiffwriter import TiffWriter from msi.io.tiffreader import TiffReader from msi.test import helpers class TestTiffWriter(unittest.TestCase): def setUp(self): # setup file and the path where it shall be written to self.msi = helpers.getFakeMsi() self.msi.set_image(self.msi.get_image()) self.fileUriToWrite = os.path.join(os.getcwd(), "testfiles") def tearDown(self): # remove the hopefully written files folder, file_prefix = os.path.split(self.fileUriToWrite) image_files = [f for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f))] image_files = [f for f in image_files if f.startswith(file_prefix)] # expand to full path image_files = [os.path.join(folder, f) for f in image_files] for f in image_files: os.remove(f) def test_imageWriterCreatesFile(self): writer = TiffWriter(self.msi, convert_to_nm=False) writer.write(self.fileUriToWrite) def test_imageWriterCreatesCorrectFile(self): writer = TiffWriter(self.msi, convert_to_nm=False) writer.write(self.fileUriToWrite) reader = TiffReader(shift_bits=0) msi = reader.read(self.fileUriToWrite) self.assertTrue(msi == helpers.getFakeMsi(), "image correctly written and read")
Add a test for the new tiff writer.
Add a test for the new tiff writer.
Python
bsd-3-clause
RabadanLab/MITKats,RabadanLab/MITKats,fmilano/mitk,iwegner/MITK,iwegner/MITK,RabadanLab/MITKats,fmilano/mitk,MITK/MITK,iwegner/MITK,MITK/MITK,iwegner/MITK,fmilano/mitk,MITK/MITK,fmilano/mitk,fmilano/mitk,MITK/MITK,RabadanLab/MITKats,MITK/MITK,fmilano/mitk,RabadanLab/MITKats,iwegner/MITK,MITK/MITK,iwegner/MITK,fmilano/mitk,RabadanLab/MITKats
Add a test for the new tiff writer.
import unittest import os from msi.io.tiffwriter import TiffWriter from msi.io.tiffreader import TiffReader from msi.test import helpers class TestTiffWriter(unittest.TestCase): def setUp(self): # setup file and the path where it shall be written to self.msi = helpers.getFakeMsi() self.msi.set_image(self.msi.get_image()) self.fileUriToWrite = os.path.join(os.getcwd(), "testfiles") def tearDown(self): # remove the hopefully written files folder, file_prefix = os.path.split(self.fileUriToWrite) image_files = [f for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f))] image_files = [f for f in image_files if f.startswith(file_prefix)] # expand to full path image_files = [os.path.join(folder, f) for f in image_files] for f in image_files: os.remove(f) def test_imageWriterCreatesFile(self): writer = TiffWriter(self.msi, convert_to_nm=False) writer.write(self.fileUriToWrite) def test_imageWriterCreatesCorrectFile(self): writer = TiffWriter(self.msi, convert_to_nm=False) writer.write(self.fileUriToWrite) reader = TiffReader(shift_bits=0) msi = reader.read(self.fileUriToWrite) self.assertTrue(msi == helpers.getFakeMsi(), "image correctly written and read")
<commit_before><commit_msg>Add a test for the new tiff writer.<commit_after>
import unittest import os from msi.io.tiffwriter import TiffWriter from msi.io.tiffreader import TiffReader from msi.test import helpers class TestTiffWriter(unittest.TestCase): def setUp(self): # setup file and the path where it shall be written to self.msi = helpers.getFakeMsi() self.msi.set_image(self.msi.get_image()) self.fileUriToWrite = os.path.join(os.getcwd(), "testfiles") def tearDown(self): # remove the hopefully written files folder, file_prefix = os.path.split(self.fileUriToWrite) image_files = [f for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f))] image_files = [f for f in image_files if f.startswith(file_prefix)] # expand to full path image_files = [os.path.join(folder, f) for f in image_files] for f in image_files: os.remove(f) def test_imageWriterCreatesFile(self): writer = TiffWriter(self.msi, convert_to_nm=False) writer.write(self.fileUriToWrite) def test_imageWriterCreatesCorrectFile(self): writer = TiffWriter(self.msi, convert_to_nm=False) writer.write(self.fileUriToWrite) reader = TiffReader(shift_bits=0) msi = reader.read(self.fileUriToWrite) self.assertTrue(msi == helpers.getFakeMsi(), "image correctly written and read")
Add a test for the new tiff writer. import unittest import os from msi.io.tiffwriter import TiffWriter from msi.io.tiffreader import TiffReader from msi.test import helpers class TestTiffWriter(unittest.TestCase): def setUp(self): # setup file and the path where it shall be written to self.msi = helpers.getFakeMsi() self.msi.set_image(self.msi.get_image()) self.fileUriToWrite = os.path.join(os.getcwd(), "testfiles") def tearDown(self): # remove the hopefully written files folder, file_prefix = os.path.split(self.fileUriToWrite) image_files = [f for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f))] image_files = [f for f in image_files if f.startswith(file_prefix)] # expand to full path image_files = [os.path.join(folder, f) for f in image_files] for f in image_files: os.remove(f) def test_imageWriterCreatesFile(self): writer = TiffWriter(self.msi, convert_to_nm=False) writer.write(self.fileUriToWrite) def test_imageWriterCreatesCorrectFile(self): writer = TiffWriter(self.msi, convert_to_nm=False) writer.write(self.fileUriToWrite) reader = TiffReader(shift_bits=0) msi = reader.read(self.fileUriToWrite) self.assertTrue(msi == helpers.getFakeMsi(), "image correctly written and read")
<commit_before><commit_msg>Add a test for the new tiff writer.<commit_after> import unittest import os from msi.io.tiffwriter import TiffWriter from msi.io.tiffreader import TiffReader from msi.test import helpers class TestTiffWriter(unittest.TestCase): def setUp(self): # setup file and the path where it shall be written to self.msi = helpers.getFakeMsi() self.msi.set_image(self.msi.get_image()) self.fileUriToWrite = os.path.join(os.getcwd(), "testfiles") def tearDown(self): # remove the hopefully written files folder, file_prefix = os.path.split(self.fileUriToWrite) image_files = [f for f in os.listdir(folder) if os.path.isfile(os.path.join(folder, f))] image_files = [f for f in image_files if f.startswith(file_prefix)] # expand to full path image_files = [os.path.join(folder, f) for f in image_files] for f in image_files: os.remove(f) def test_imageWriterCreatesFile(self): writer = TiffWriter(self.msi, convert_to_nm=False) writer.write(self.fileUriToWrite) def test_imageWriterCreatesCorrectFile(self): writer = TiffWriter(self.msi, convert_to_nm=False) writer.write(self.fileUriToWrite) reader = TiffReader(shift_bits=0) msi = reader.read(self.fileUriToWrite) self.assertTrue(msi == helpers.getFakeMsi(), "image correctly written and read")
18c66e780bf8ae6ba4a9f5aa13720a1370c8d7a3
plugins/Tools/SelectionTool/SelectionPass.py
plugins/Tools/SelectionTool/SelectionPass.py
# Copyright (c) 2015 Ultimaker B.V. # Cura is released under the terms of the AGPLv3 or higher. from UM.Resources import Resources from UM.Application import Application from UM.Scene.Iterator.DepthFirstIterator import DepthFirstIterator from UM.View.RenderPass import RenderPass from UM.View.GL.OpenGL import OpenGL class CompositePass(RenderPass): def __init__(self, name, width, height): super().__init__(name, width, height) self._shader = OpenGL.getInstance().createMaterial(Resources.getPath(Resources.Shaders, "composite.shader")) self._gl = OpenGL.getInstance().getBindingsObject() self._scene = Application.getInstance().getController().getScene() def renderContents(self): for node in DepthFirstIterator(self._scene.getRoot()): if node.isSelectable() and node.getMeshData(): self.renderNode(node) def renderOutput(self): self._shader.bind() texture_unit = 0 for render_pass in renderer.getRenderPasses(): self._gl.glActiveTexture(texture_unit) self._gl.glBindTexture(self._gl.GL_TEXTURE_2D, render_pass.getTextureId()) texture_unit += 1 self._shader.setUniformValue("u_layer_count", texture_unit + 1) self._shader.setUniformValueArray("u_layers", [range(0, texture_unit)], texture_unit + 1) self.renderQuad() self._shader.release()
Add basic selection render pass for rendering the selection image
Add basic selection render pass for rendering the selection image
Python
agpl-3.0
onitake/Uranium,onitake/Uranium
Add basic selection render pass for rendering the selection image
# Copyright (c) 2015 Ultimaker B.V. # Cura is released under the terms of the AGPLv3 or higher. from UM.Resources import Resources from UM.Application import Application from UM.Scene.Iterator.DepthFirstIterator import DepthFirstIterator from UM.View.RenderPass import RenderPass from UM.View.GL.OpenGL import OpenGL class CompositePass(RenderPass): def __init__(self, name, width, height): super().__init__(name, width, height) self._shader = OpenGL.getInstance().createMaterial(Resources.getPath(Resources.Shaders, "composite.shader")) self._gl = OpenGL.getInstance().getBindingsObject() self._scene = Application.getInstance().getController().getScene() def renderContents(self): for node in DepthFirstIterator(self._scene.getRoot()): if node.isSelectable() and node.getMeshData(): self.renderNode(node) def renderOutput(self): self._shader.bind() texture_unit = 0 for render_pass in renderer.getRenderPasses(): self._gl.glActiveTexture(texture_unit) self._gl.glBindTexture(self._gl.GL_TEXTURE_2D, render_pass.getTextureId()) texture_unit += 1 self._shader.setUniformValue("u_layer_count", texture_unit + 1) self._shader.setUniformValueArray("u_layers", [range(0, texture_unit)], texture_unit + 1) self.renderQuad() self._shader.release()
<commit_before><commit_msg>Add basic selection render pass for rendering the selection image<commit_after>
# Copyright (c) 2015 Ultimaker B.V. # Cura is released under the terms of the AGPLv3 or higher. from UM.Resources import Resources from UM.Application import Application from UM.Scene.Iterator.DepthFirstIterator import DepthFirstIterator from UM.View.RenderPass import RenderPass from UM.View.GL.OpenGL import OpenGL class CompositePass(RenderPass): def __init__(self, name, width, height): super().__init__(name, width, height) self._shader = OpenGL.getInstance().createMaterial(Resources.getPath(Resources.Shaders, "composite.shader")) self._gl = OpenGL.getInstance().getBindingsObject() self._scene = Application.getInstance().getController().getScene() def renderContents(self): for node in DepthFirstIterator(self._scene.getRoot()): if node.isSelectable() and node.getMeshData(): self.renderNode(node) def renderOutput(self): self._shader.bind() texture_unit = 0 for render_pass in renderer.getRenderPasses(): self._gl.glActiveTexture(texture_unit) self._gl.glBindTexture(self._gl.GL_TEXTURE_2D, render_pass.getTextureId()) texture_unit += 1 self._shader.setUniformValue("u_layer_count", texture_unit + 1) self._shader.setUniformValueArray("u_layers", [range(0, texture_unit)], texture_unit + 1) self.renderQuad() self._shader.release()
Add basic selection render pass for rendering the selection image# Copyright (c) 2015 Ultimaker B.V. # Cura is released under the terms of the AGPLv3 or higher. from UM.Resources import Resources from UM.Application import Application from UM.Scene.Iterator.DepthFirstIterator import DepthFirstIterator from UM.View.RenderPass import RenderPass from UM.View.GL.OpenGL import OpenGL class CompositePass(RenderPass): def __init__(self, name, width, height): super().__init__(name, width, height) self._shader = OpenGL.getInstance().createMaterial(Resources.getPath(Resources.Shaders, "composite.shader")) self._gl = OpenGL.getInstance().getBindingsObject() self._scene = Application.getInstance().getController().getScene() def renderContents(self): for node in DepthFirstIterator(self._scene.getRoot()): if node.isSelectable() and node.getMeshData(): self.renderNode(node) def renderOutput(self): self._shader.bind() texture_unit = 0 for render_pass in renderer.getRenderPasses(): self._gl.glActiveTexture(texture_unit) self._gl.glBindTexture(self._gl.GL_TEXTURE_2D, render_pass.getTextureId()) texture_unit += 1 self._shader.setUniformValue("u_layer_count", texture_unit + 1) self._shader.setUniformValueArray("u_layers", [range(0, texture_unit)], texture_unit + 1) self.renderQuad() self._shader.release()
<commit_before><commit_msg>Add basic selection render pass for rendering the selection image<commit_after># Copyright (c) 2015 Ultimaker B.V. # Cura is released under the terms of the AGPLv3 or higher. from UM.Resources import Resources from UM.Application import Application from UM.Scene.Iterator.DepthFirstIterator import DepthFirstIterator from UM.View.RenderPass import RenderPass from UM.View.GL.OpenGL import OpenGL class CompositePass(RenderPass): def __init__(self, name, width, height): super().__init__(name, width, height) self._shader = OpenGL.getInstance().createMaterial(Resources.getPath(Resources.Shaders, "composite.shader")) self._gl = OpenGL.getInstance().getBindingsObject() self._scene = Application.getInstance().getController().getScene() def renderContents(self): for node in DepthFirstIterator(self._scene.getRoot()): if node.isSelectable() and node.getMeshData(): self.renderNode(node) def renderOutput(self): self._shader.bind() texture_unit = 0 for render_pass in renderer.getRenderPasses(): self._gl.glActiveTexture(texture_unit) self._gl.glBindTexture(self._gl.GL_TEXTURE_2D, render_pass.getTextureId()) texture_unit += 1 self._shader.setUniformValue("u_layer_count", texture_unit + 1) self._shader.setUniformValueArray("u_layers", [range(0, texture_unit)], texture_unit + 1) self.renderQuad() self._shader.release()
5cfbf23ff88a2d028fdd852adc735263c060f4eb
inet/inet.py
inet/inet.py
# -*- coding: utf-8 -*- import csv import os from collections import namedtuple class Inet(): """Inet class""" def __init__(self, data_file=None): # Naive check for file type based on extension # First check filepath is passed as a parameter if data_file is not None: # Then split off the extension using os ext = os.path.splitext(data_file)[-1].lower() # then check ends with .csv or .json if ext == '.csv': self.rows = [] with open(data_file) as f: f_csv = csv.reader(f) headings = next(f_csv) Row = namedtuple('Row', headings) for r in f_csv: row = Row(*r) self.rows.append(row) else: raise TypeError("Input file must be of type .csv") else: raise AttributeError("No data_file path specified as a " "parameter to Inet object")
Add read data file functionality to Inet class init
Add read data file functionality to Inet class init
Python
mit
nestauk/inet
Add read data file functionality to Inet class init
# -*- coding: utf-8 -*- import csv import os from collections import namedtuple class Inet(): """Inet class""" def __init__(self, data_file=None): # Naive check for file type based on extension # First check filepath is passed as a parameter if data_file is not None: # Then split off the extension using os ext = os.path.splitext(data_file)[-1].lower() # then check ends with .csv or .json if ext == '.csv': self.rows = [] with open(data_file) as f: f_csv = csv.reader(f) headings = next(f_csv) Row = namedtuple('Row', headings) for r in f_csv: row = Row(*r) self.rows.append(row) else: raise TypeError("Input file must be of type .csv") else: raise AttributeError("No data_file path specified as a " "parameter to Inet object")
<commit_before><commit_msg>Add read data file functionality to Inet class init<commit_after>
# -*- coding: utf-8 -*- import csv import os from collections import namedtuple class Inet(): """Inet class""" def __init__(self, data_file=None): # Naive check for file type based on extension # First check filepath is passed as a parameter if data_file is not None: # Then split off the extension using os ext = os.path.splitext(data_file)[-1].lower() # then check ends with .csv or .json if ext == '.csv': self.rows = [] with open(data_file) as f: f_csv = csv.reader(f) headings = next(f_csv) Row = namedtuple('Row', headings) for r in f_csv: row = Row(*r) self.rows.append(row) else: raise TypeError("Input file must be of type .csv") else: raise AttributeError("No data_file path specified as a " "parameter to Inet object")
Add read data file functionality to Inet class init# -*- coding: utf-8 -*- import csv import os from collections import namedtuple class Inet(): """Inet class""" def __init__(self, data_file=None): # Naive check for file type based on extension # First check filepath is passed as a parameter if data_file is not None: # Then split off the extension using os ext = os.path.splitext(data_file)[-1].lower() # then check ends with .csv or .json if ext == '.csv': self.rows = [] with open(data_file) as f: f_csv = csv.reader(f) headings = next(f_csv) Row = namedtuple('Row', headings) for r in f_csv: row = Row(*r) self.rows.append(row) else: raise TypeError("Input file must be of type .csv") else: raise AttributeError("No data_file path specified as a " "parameter to Inet object")
<commit_before><commit_msg>Add read data file functionality to Inet class init<commit_after># -*- coding: utf-8 -*- import csv import os from collections import namedtuple class Inet(): """Inet class""" def __init__(self, data_file=None): # Naive check for file type based on extension # First check filepath is passed as a parameter if data_file is not None: # Then split off the extension using os ext = os.path.splitext(data_file)[-1].lower() # then check ends with .csv or .json if ext == '.csv': self.rows = [] with open(data_file) as f: f_csv = csv.reader(f) headings = next(f_csv) Row = namedtuple('Row', headings) for r in f_csv: row = Row(*r) self.rows.append(row) else: raise TypeError("Input file must be of type .csv") else: raise AttributeError("No data_file path specified as a " "parameter to Inet object")
1a1d390ce80ffe3fea7e1e132090f38072f38117
performance-test.py
performance-test.py
#!/usr/bin/env python import time import logging from ina219 import INA219 SHUNT_OHMS = 0.1 MAX_EXPECTED_AMPS = 0.2 READS = 100 ina = INA219(SHUNT_OHMS, MAX_EXPECTED_AMPS, log_level=logging.INFO) def init(): ina.configure(ina.RANGE_16V, ina.GAIN_AUTO) def read(): for x in range(0, READS): v = ina.voltage() if __name__ == "__main__": init() start = time.time() read() finish = time.time() elapsed = (finish - start) * 1000000 print("Read time (average over %d reads): %d microseconds" % (READS, int(elapsed / READS)))
Add performance test python file
Add performance test python file
Python
mit
chrisb2/pi_ina219
Add performance test python file
#!/usr/bin/env python import time import logging from ina219 import INA219 SHUNT_OHMS = 0.1 MAX_EXPECTED_AMPS = 0.2 READS = 100 ina = INA219(SHUNT_OHMS, MAX_EXPECTED_AMPS, log_level=logging.INFO) def init(): ina.configure(ina.RANGE_16V, ina.GAIN_AUTO) def read(): for x in range(0, READS): v = ina.voltage() if __name__ == "__main__": init() start = time.time() read() finish = time.time() elapsed = (finish - start) * 1000000 print("Read time (average over %d reads): %d microseconds" % (READS, int(elapsed / READS)))
<commit_before><commit_msg>Add performance test python file<commit_after>
#!/usr/bin/env python import time import logging from ina219 import INA219 SHUNT_OHMS = 0.1 MAX_EXPECTED_AMPS = 0.2 READS = 100 ina = INA219(SHUNT_OHMS, MAX_EXPECTED_AMPS, log_level=logging.INFO) def init(): ina.configure(ina.RANGE_16V, ina.GAIN_AUTO) def read(): for x in range(0, READS): v = ina.voltage() if __name__ == "__main__": init() start = time.time() read() finish = time.time() elapsed = (finish - start) * 1000000 print("Read time (average over %d reads): %d microseconds" % (READS, int(elapsed / READS)))
Add performance test python file#!/usr/bin/env python import time import logging from ina219 import INA219 SHUNT_OHMS = 0.1 MAX_EXPECTED_AMPS = 0.2 READS = 100 ina = INA219(SHUNT_OHMS, MAX_EXPECTED_AMPS, log_level=logging.INFO) def init(): ina.configure(ina.RANGE_16V, ina.GAIN_AUTO) def read(): for x in range(0, READS): v = ina.voltage() if __name__ == "__main__": init() start = time.time() read() finish = time.time() elapsed = (finish - start) * 1000000 print("Read time (average over %d reads): %d microseconds" % (READS, int(elapsed / READS)))
<commit_before><commit_msg>Add performance test python file<commit_after>#!/usr/bin/env python import time import logging from ina219 import INA219 SHUNT_OHMS = 0.1 MAX_EXPECTED_AMPS = 0.2 READS = 100 ina = INA219(SHUNT_OHMS, MAX_EXPECTED_AMPS, log_level=logging.INFO) def init(): ina.configure(ina.RANGE_16V, ina.GAIN_AUTO) def read(): for x in range(0, READS): v = ina.voltage() if __name__ == "__main__": init() start = time.time() read() finish = time.time() elapsed = (finish - start) * 1000000 print("Read time (average over %d reads): %d microseconds" % (READS, int(elapsed / READS)))
f408346e69f643e603f279c2581fad8c99962b11
service_registry_cli/commands/configuration/remove.py
service_registry_cli/commands/configuration/remove.py
# Copyright 2012 Rackspace # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from service_registry_cli.utils import BaseCommand, get_client class RemoveCommand(BaseCommand): """ Remove a configuration value. """ log = logging.getLogger(__name__) def get_parser(self, prog_name): parser = super(RemoveCommand, self).get_parser(prog_name=prog_name) parser.add_argument('--id', dest='id') return parser def take_action(self, parsed_args): client = get_client(parsed_args) configuration_id = parsed_args.id client.configuration.remove(configuration_id=configuration_id) self.app.stdout.write('Configuration value "%s" has been removed\n' % (configuration_id))
Add a command for removing a configuration value.
Add a command for removing a configuration value.
Python
apache-2.0
racker/python-service-registry-cli,racker/python-service-registry-cli
Add a command for removing a configuration value.
# Copyright 2012 Rackspace # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from service_registry_cli.utils import BaseCommand, get_client class RemoveCommand(BaseCommand): """ Remove a configuration value. """ log = logging.getLogger(__name__) def get_parser(self, prog_name): parser = super(RemoveCommand, self).get_parser(prog_name=prog_name) parser.add_argument('--id', dest='id') return parser def take_action(self, parsed_args): client = get_client(parsed_args) configuration_id = parsed_args.id client.configuration.remove(configuration_id=configuration_id) self.app.stdout.write('Configuration value "%s" has been removed\n' % (configuration_id))
<commit_before><commit_msg>Add a command for removing a configuration value.<commit_after>
# Copyright 2012 Rackspace # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from service_registry_cli.utils import BaseCommand, get_client class RemoveCommand(BaseCommand): """ Remove a configuration value. """ log = logging.getLogger(__name__) def get_parser(self, prog_name): parser = super(RemoveCommand, self).get_parser(prog_name=prog_name) parser.add_argument('--id', dest='id') return parser def take_action(self, parsed_args): client = get_client(parsed_args) configuration_id = parsed_args.id client.configuration.remove(configuration_id=configuration_id) self.app.stdout.write('Configuration value "%s" has been removed\n' % (configuration_id))
Add a command for removing a configuration value.# Copyright 2012 Rackspace # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from service_registry_cli.utils import BaseCommand, get_client class RemoveCommand(BaseCommand): """ Remove a configuration value. """ log = logging.getLogger(__name__) def get_parser(self, prog_name): parser = super(RemoveCommand, self).get_parser(prog_name=prog_name) parser.add_argument('--id', dest='id') return parser def take_action(self, parsed_args): client = get_client(parsed_args) configuration_id = parsed_args.id client.configuration.remove(configuration_id=configuration_id) self.app.stdout.write('Configuration value "%s" has been removed\n' % (configuration_id))
<commit_before><commit_msg>Add a command for removing a configuration value.<commit_after># Copyright 2012 Rackspace # # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from service_registry_cli.utils import BaseCommand, get_client class RemoveCommand(BaseCommand): """ Remove a configuration value. """ log = logging.getLogger(__name__) def get_parser(self, prog_name): parser = super(RemoveCommand, self).get_parser(prog_name=prog_name) parser.add_argument('--id', dest='id') return parser def take_action(self, parsed_args): client = get_client(parsed_args) configuration_id = parsed_args.id client.configuration.remove(configuration_id=configuration_id) self.app.stdout.write('Configuration value "%s" has been removed\n' % (configuration_id))
929849f09b62a06d8124d0f9f90d866911700718
sandcats/trivial_tests.py
sandcats/trivial_tests.py
import requests def register_asheesh(): return requests.post( 'http://localhost:3000/register', {'rawHostname': 'asheesh', 'email': 'asheesh@asheesh.org', 'pubkey': open('snakeoil-sample-certs/ssl-cert-snakeoil.pubkey').read()}, )
Add trivial semi-manual test suite
Add trivial semi-manual test suite
Python
apache-2.0
sandstorm-io/sandcats,sandstorm-io/sandcats,sandstorm-io/sandcats,sandstorm-io/sandcats
Add trivial semi-manual test suite
import requests def register_asheesh(): return requests.post( 'http://localhost:3000/register', {'rawHostname': 'asheesh', 'email': 'asheesh@asheesh.org', 'pubkey': open('snakeoil-sample-certs/ssl-cert-snakeoil.pubkey').read()}, )
<commit_before><commit_msg>Add trivial semi-manual test suite<commit_after>
import requests def register_asheesh(): return requests.post( 'http://localhost:3000/register', {'rawHostname': 'asheesh', 'email': 'asheesh@asheesh.org', 'pubkey': open('snakeoil-sample-certs/ssl-cert-snakeoil.pubkey').read()}, )
Add trivial semi-manual test suiteimport requests def register_asheesh(): return requests.post( 'http://localhost:3000/register', {'rawHostname': 'asheesh', 'email': 'asheesh@asheesh.org', 'pubkey': open('snakeoil-sample-certs/ssl-cert-snakeoil.pubkey').read()}, )
<commit_before><commit_msg>Add trivial semi-manual test suite<commit_after>import requests def register_asheesh(): return requests.post( 'http://localhost:3000/register', {'rawHostname': 'asheesh', 'email': 'asheesh@asheesh.org', 'pubkey': open('snakeoil-sample-certs/ssl-cert-snakeoil.pubkey').read()}, )
d67e7a17097ed08069a1e21fc09e0863c8bd56a8
analysis/multinomial_nb.py
analysis/multinomial_nb.py
# import dataset import json INPUT_FILE = './analysis/input/dev_posts.json' tweets = [] with open(INPUT_FILE, 'r') as f: for line in f: t = json.loads(line) tweets.append(t['body']) print('Total number of tweets: {}'.format(len(tweets))) # import results import numpy as np TARGET_FILE = './analysis/input/test_results.csv' f = open(TARGET_FILE) target = np.loadtxt(f) # split train/test 60/40 from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(tweets, target, test_size=0.4, random_state=1) print('Train: {},{}'.format(len(X_train), y_train.shape)) print('Test: {},{}'.format(len(X_test), y_test.shape)) # train from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import Pipeline text_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', MultinomialNB())]) text_clf = text_clf.fit(X_train, y_train) # predict predicted = text_clf.predict(X_test) print('Accuracy: {}'.format(np.mean(predicted == y_test))) X_ones = np.array(X_test)[y_test == 1] predicted_positive = text_clf.predict(X_ones) print('Positive accuracy: {}'.format(np.mean(predicted_positive == 1))) X_ones = np.array(X_test)[y_test == -1] predicted_negative = text_clf.predict(X_ones) print('Negative accuracy: {}'.format(np.mean(predicted_negative == -1))) # metrics from sklearn import metrics predicted = text_clf.predict(X_test) print(metrics.classification_report(y_test, predicted)) print('Confusion matrix: \n{}'.format(metrics.confusion_matrix(y_test, predicted)))
Add baseline naive bayes test
Add baseline naive bayes test
Python
apache-2.0
chuajiesheng/twitter-sentiment-analysis
Add baseline naive bayes test
# import dataset import json INPUT_FILE = './analysis/input/dev_posts.json' tweets = [] with open(INPUT_FILE, 'r') as f: for line in f: t = json.loads(line) tweets.append(t['body']) print('Total number of tweets: {}'.format(len(tweets))) # import results import numpy as np TARGET_FILE = './analysis/input/test_results.csv' f = open(TARGET_FILE) target = np.loadtxt(f) # split train/test 60/40 from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(tweets, target, test_size=0.4, random_state=1) print('Train: {},{}'.format(len(X_train), y_train.shape)) print('Test: {},{}'.format(len(X_test), y_test.shape)) # train from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import Pipeline text_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', MultinomialNB())]) text_clf = text_clf.fit(X_train, y_train) # predict predicted = text_clf.predict(X_test) print('Accuracy: {}'.format(np.mean(predicted == y_test))) X_ones = np.array(X_test)[y_test == 1] predicted_positive = text_clf.predict(X_ones) print('Positive accuracy: {}'.format(np.mean(predicted_positive == 1))) X_ones = np.array(X_test)[y_test == -1] predicted_negative = text_clf.predict(X_ones) print('Negative accuracy: {}'.format(np.mean(predicted_negative == -1))) # metrics from sklearn import metrics predicted = text_clf.predict(X_test) print(metrics.classification_report(y_test, predicted)) print('Confusion matrix: \n{}'.format(metrics.confusion_matrix(y_test, predicted)))
<commit_before><commit_msg>Add baseline naive bayes test<commit_after>
# import dataset import json INPUT_FILE = './analysis/input/dev_posts.json' tweets = [] with open(INPUT_FILE, 'r') as f: for line in f: t = json.loads(line) tweets.append(t['body']) print('Total number of tweets: {}'.format(len(tweets))) # import results import numpy as np TARGET_FILE = './analysis/input/test_results.csv' f = open(TARGET_FILE) target = np.loadtxt(f) # split train/test 60/40 from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(tweets, target, test_size=0.4, random_state=1) print('Train: {},{}'.format(len(X_train), y_train.shape)) print('Test: {},{}'.format(len(X_test), y_test.shape)) # train from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import Pipeline text_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', MultinomialNB())]) text_clf = text_clf.fit(X_train, y_train) # predict predicted = text_clf.predict(X_test) print('Accuracy: {}'.format(np.mean(predicted == y_test))) X_ones = np.array(X_test)[y_test == 1] predicted_positive = text_clf.predict(X_ones) print('Positive accuracy: {}'.format(np.mean(predicted_positive == 1))) X_ones = np.array(X_test)[y_test == -1] predicted_negative = text_clf.predict(X_ones) print('Negative accuracy: {}'.format(np.mean(predicted_negative == -1))) # metrics from sklearn import metrics predicted = text_clf.predict(X_test) print(metrics.classification_report(y_test, predicted)) print('Confusion matrix: \n{}'.format(metrics.confusion_matrix(y_test, predicted)))
Add baseline naive bayes test# import dataset import json INPUT_FILE = './analysis/input/dev_posts.json' tweets = [] with open(INPUT_FILE, 'r') as f: for line in f: t = json.loads(line) tweets.append(t['body']) print('Total number of tweets: {}'.format(len(tweets))) # import results import numpy as np TARGET_FILE = './analysis/input/test_results.csv' f = open(TARGET_FILE) target = np.loadtxt(f) # split train/test 60/40 from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(tweets, target, test_size=0.4, random_state=1) print('Train: {},{}'.format(len(X_train), y_train.shape)) print('Test: {},{}'.format(len(X_test), y_test.shape)) # train from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import Pipeline text_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', MultinomialNB())]) text_clf = text_clf.fit(X_train, y_train) # predict predicted = text_clf.predict(X_test) print('Accuracy: {}'.format(np.mean(predicted == y_test))) X_ones = np.array(X_test)[y_test == 1] predicted_positive = text_clf.predict(X_ones) print('Positive accuracy: {}'.format(np.mean(predicted_positive == 1))) X_ones = np.array(X_test)[y_test == -1] predicted_negative = text_clf.predict(X_ones) print('Negative accuracy: {}'.format(np.mean(predicted_negative == -1))) # metrics from sklearn import metrics predicted = text_clf.predict(X_test) print(metrics.classification_report(y_test, predicted)) print('Confusion matrix: \n{}'.format(metrics.confusion_matrix(y_test, predicted)))
<commit_before><commit_msg>Add baseline naive bayes test<commit_after># import dataset import json INPUT_FILE = './analysis/input/dev_posts.json' tweets = [] with open(INPUT_FILE, 'r') as f: for line in f: t = json.loads(line) tweets.append(t['body']) print('Total number of tweets: {}'.format(len(tweets))) # import results import numpy as np TARGET_FILE = './analysis/input/test_results.csv' f = open(TARGET_FILE) target = np.loadtxt(f) # split train/test 60/40 from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(tweets, target, test_size=0.4, random_state=1) print('Train: {},{}'.format(len(X_train), y_train.shape)) print('Test: {},{}'.format(len(X_test), y_test.shape)) # train from sklearn.feature_extraction.text import CountVectorizer from sklearn.feature_extraction.text import TfidfTransformer from sklearn.naive_bayes import MultinomialNB from sklearn.pipeline import Pipeline text_clf = Pipeline([('vect', CountVectorizer()), ('tfidf', TfidfTransformer()), ('clf', MultinomialNB())]) text_clf = text_clf.fit(X_train, y_train) # predict predicted = text_clf.predict(X_test) print('Accuracy: {}'.format(np.mean(predicted == y_test))) X_ones = np.array(X_test)[y_test == 1] predicted_positive = text_clf.predict(X_ones) print('Positive accuracy: {}'.format(np.mean(predicted_positive == 1))) X_ones = np.array(X_test)[y_test == -1] predicted_negative = text_clf.predict(X_ones) print('Negative accuracy: {}'.format(np.mean(predicted_negative == -1))) # metrics from sklearn import metrics predicted = text_clf.predict(X_test) print(metrics.classification_report(y_test, predicted)) print('Confusion matrix: \n{}'.format(metrics.confusion_matrix(y_test, predicted)))
94e7c97b8fc22599ea92c5301bfeabf714de1635
github3/search/code.py
github3/search/code.py
from github3.models import GitHubCore from github3.repos import Repository class CodeSearchResult(GitHubCore): def __init__(self, data, session=None): super(CodeSearchResult, self).__init__(data, session) self._api = data.get('url') #: Filename the match occurs in self.name = data.get('name') #: Path in the repository to the file self.path = data.get('path') #: SHA in which the code can be found self.sha = data.get('sha') #: URL to the Git blob endpoint self.git_url = data.get('git_url') #: URL to the HTML view of the blob self.html_url = data.get('html_url') #: Repository the code snippet belongs to self.repository = Repository(data.get('repository', {}), self) #: Score of the result self.score = data.get('score') #: Text matches self.text_matches = data.get('text_matches', [])
Add a class to encapsulate these results
Add a class to encapsulate these results
Python
bsd-3-clause
christophelec/github3.py,degustaf/github3.py,itsmemattchung/github3.py,agamdua/github3.py,h4ck3rm1k3/github3.py,jim-minter/github3.py,krxsky/github3.py,ueg1990/github3.py,sigmavirus24/github3.py,wbrefvem/github3.py,icio/github3.py,balloob/github3.py
Add a class to encapsulate these results
from github3.models import GitHubCore from github3.repos import Repository class CodeSearchResult(GitHubCore): def __init__(self, data, session=None): super(CodeSearchResult, self).__init__(data, session) self._api = data.get('url') #: Filename the match occurs in self.name = data.get('name') #: Path in the repository to the file self.path = data.get('path') #: SHA in which the code can be found self.sha = data.get('sha') #: URL to the Git blob endpoint self.git_url = data.get('git_url') #: URL to the HTML view of the blob self.html_url = data.get('html_url') #: Repository the code snippet belongs to self.repository = Repository(data.get('repository', {}), self) #: Score of the result self.score = data.get('score') #: Text matches self.text_matches = data.get('text_matches', [])
<commit_before><commit_msg>Add a class to encapsulate these results<commit_after>
from github3.models import GitHubCore from github3.repos import Repository class CodeSearchResult(GitHubCore): def __init__(self, data, session=None): super(CodeSearchResult, self).__init__(data, session) self._api = data.get('url') #: Filename the match occurs in self.name = data.get('name') #: Path in the repository to the file self.path = data.get('path') #: SHA in which the code can be found self.sha = data.get('sha') #: URL to the Git blob endpoint self.git_url = data.get('git_url') #: URL to the HTML view of the blob self.html_url = data.get('html_url') #: Repository the code snippet belongs to self.repository = Repository(data.get('repository', {}), self) #: Score of the result self.score = data.get('score') #: Text matches self.text_matches = data.get('text_matches', [])
Add a class to encapsulate these resultsfrom github3.models import GitHubCore from github3.repos import Repository class CodeSearchResult(GitHubCore): def __init__(self, data, session=None): super(CodeSearchResult, self).__init__(data, session) self._api = data.get('url') #: Filename the match occurs in self.name = data.get('name') #: Path in the repository to the file self.path = data.get('path') #: SHA in which the code can be found self.sha = data.get('sha') #: URL to the Git blob endpoint self.git_url = data.get('git_url') #: URL to the HTML view of the blob self.html_url = data.get('html_url') #: Repository the code snippet belongs to self.repository = Repository(data.get('repository', {}), self) #: Score of the result self.score = data.get('score') #: Text matches self.text_matches = data.get('text_matches', [])
<commit_before><commit_msg>Add a class to encapsulate these results<commit_after>from github3.models import GitHubCore from github3.repos import Repository class CodeSearchResult(GitHubCore): def __init__(self, data, session=None): super(CodeSearchResult, self).__init__(data, session) self._api = data.get('url') #: Filename the match occurs in self.name = data.get('name') #: Path in the repository to the file self.path = data.get('path') #: SHA in which the code can be found self.sha = data.get('sha') #: URL to the Git blob endpoint self.git_url = data.get('git_url') #: URL to the HTML view of the blob self.html_url = data.get('html_url') #: Repository the code snippet belongs to self.repository = Repository(data.get('repository', {}), self) #: Score of the result self.score = data.get('score') #: Text matches self.text_matches = data.get('text_matches', [])
05471b5d1b5328af337877433327fd90987a7492
http_date.py
http_date.py
#!/usr/bin/env python3 import re months = 'Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split() http_month_pattern = '(%s)' % '|'.join(months) http_time_pattern = '\d\d:\d\d:\d\d' http_date_pattern = re.compile(r'''^ \S+ ( ,\s (?P<day1>\d+) ( \s (?P<month1>{0}) \s (?P<year1>\d+) | - (?P<month2>{0}) - (?P<year2digit>\d+) ) \s (?P<time1>{1}) \s GMT | \s (?P<month3>{0}) \s+ (?P<day2>\d+) \s (?P<time2>{1}) \s (?P<year3>\d+) ) $'''.format(http_month_pattern, http_time_pattern), re.VERBOSE) def parse_http_date(date): ''' Parses a string in any of the three standard HTTP-date formats (e.g. HTTP Last-Modified header) and returns a UTC tuple. It might be simpler to use time.strptime against the three possible formats and see which one works, but the locale specific stuff that does worries me since this string is locale agnostic. Maybe I'm just over-paranoid? ''' import time import calendar match = http_date_pattern.match(date) if match is None: raise SyntaxError('Could not parse HTTP-date: %s' % date) match = match.groupdict() day = list(filter(None, (match['day1'], match['day2']))) assert(len(day) == 1) day = int(day[0]) month = list(filter(None, (match['month1'], match['month2'], match['month3']))) assert(len(month) == 1) month = months.index(month[0]) + 1 if match['year2digit']: year = int(match['year2digit']) cur_year = time.gmtime().tm_year if year > cur_year % 100 + 50: cur_year -= 50 year += cur_year // 100 * 100 else: year = list(filter(None, (match['year1'], match['year3']))) assert(len(year) == 1) year = int(year[0]) t = list(filter(None, (match['time1'], match['time2']))) assert(len(t) == 1) (hour, min, sec) = map(int, t[0].split(':')) return time.gmtime(calendar.timegm([year, month, day, hour, min, sec, 0, 0, 0]))
Add some python code to parse a HTTP-date
Add some python code to parse a HTTP-date
Python
mit
DarkStarSword/junk,DarkStarSword/junk,DarkStarSword/junk,DarkStarSword/junk,DarkStarSword/junk
Add some python code to parse a HTTP-date
#!/usr/bin/env python3 import re months = 'Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split() http_month_pattern = '(%s)' % '|'.join(months) http_time_pattern = '\d\d:\d\d:\d\d' http_date_pattern = re.compile(r'''^ \S+ ( ,\s (?P<day1>\d+) ( \s (?P<month1>{0}) \s (?P<year1>\d+) | - (?P<month2>{0}) - (?P<year2digit>\d+) ) \s (?P<time1>{1}) \s GMT | \s (?P<month3>{0}) \s+ (?P<day2>\d+) \s (?P<time2>{1}) \s (?P<year3>\d+) ) $'''.format(http_month_pattern, http_time_pattern), re.VERBOSE) def parse_http_date(date): ''' Parses a string in any of the three standard HTTP-date formats (e.g. HTTP Last-Modified header) and returns a UTC tuple. It might be simpler to use time.strptime against the three possible formats and see which one works, but the locale specific stuff that does worries me since this string is locale agnostic. Maybe I'm just over-paranoid? ''' import time import calendar match = http_date_pattern.match(date) if match is None: raise SyntaxError('Could not parse HTTP-date: %s' % date) match = match.groupdict() day = list(filter(None, (match['day1'], match['day2']))) assert(len(day) == 1) day = int(day[0]) month = list(filter(None, (match['month1'], match['month2'], match['month3']))) assert(len(month) == 1) month = months.index(month[0]) + 1 if match['year2digit']: year = int(match['year2digit']) cur_year = time.gmtime().tm_year if year > cur_year % 100 + 50: cur_year -= 50 year += cur_year // 100 * 100 else: year = list(filter(None, (match['year1'], match['year3']))) assert(len(year) == 1) year = int(year[0]) t = list(filter(None, (match['time1'], match['time2']))) assert(len(t) == 1) (hour, min, sec) = map(int, t[0].split(':')) return time.gmtime(calendar.timegm([year, month, day, hour, min, sec, 0, 0, 0]))
<commit_before><commit_msg>Add some python code to parse a HTTP-date<commit_after>
#!/usr/bin/env python3 import re months = 'Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split() http_month_pattern = '(%s)' % '|'.join(months) http_time_pattern = '\d\d:\d\d:\d\d' http_date_pattern = re.compile(r'''^ \S+ ( ,\s (?P<day1>\d+) ( \s (?P<month1>{0}) \s (?P<year1>\d+) | - (?P<month2>{0}) - (?P<year2digit>\d+) ) \s (?P<time1>{1}) \s GMT | \s (?P<month3>{0}) \s+ (?P<day2>\d+) \s (?P<time2>{1}) \s (?P<year3>\d+) ) $'''.format(http_month_pattern, http_time_pattern), re.VERBOSE) def parse_http_date(date): ''' Parses a string in any of the three standard HTTP-date formats (e.g. HTTP Last-Modified header) and returns a UTC tuple. It might be simpler to use time.strptime against the three possible formats and see which one works, but the locale specific stuff that does worries me since this string is locale agnostic. Maybe I'm just over-paranoid? ''' import time import calendar match = http_date_pattern.match(date) if match is None: raise SyntaxError('Could not parse HTTP-date: %s' % date) match = match.groupdict() day = list(filter(None, (match['day1'], match['day2']))) assert(len(day) == 1) day = int(day[0]) month = list(filter(None, (match['month1'], match['month2'], match['month3']))) assert(len(month) == 1) month = months.index(month[0]) + 1 if match['year2digit']: year = int(match['year2digit']) cur_year = time.gmtime().tm_year if year > cur_year % 100 + 50: cur_year -= 50 year += cur_year // 100 * 100 else: year = list(filter(None, (match['year1'], match['year3']))) assert(len(year) == 1) year = int(year[0]) t = list(filter(None, (match['time1'], match['time2']))) assert(len(t) == 1) (hour, min, sec) = map(int, t[0].split(':')) return time.gmtime(calendar.timegm([year, month, day, hour, min, sec, 0, 0, 0]))
Add some python code to parse a HTTP-date#!/usr/bin/env python3 import re months = 'Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split() http_month_pattern = '(%s)' % '|'.join(months) http_time_pattern = '\d\d:\d\d:\d\d' http_date_pattern = re.compile(r'''^ \S+ ( ,\s (?P<day1>\d+) ( \s (?P<month1>{0}) \s (?P<year1>\d+) | - (?P<month2>{0}) - (?P<year2digit>\d+) ) \s (?P<time1>{1}) \s GMT | \s (?P<month3>{0}) \s+ (?P<day2>\d+) \s (?P<time2>{1}) \s (?P<year3>\d+) ) $'''.format(http_month_pattern, http_time_pattern), re.VERBOSE) def parse_http_date(date): ''' Parses a string in any of the three standard HTTP-date formats (e.g. HTTP Last-Modified header) and returns a UTC tuple. It might be simpler to use time.strptime against the three possible formats and see which one works, but the locale specific stuff that does worries me since this string is locale agnostic. Maybe I'm just over-paranoid? ''' import time import calendar match = http_date_pattern.match(date) if match is None: raise SyntaxError('Could not parse HTTP-date: %s' % date) match = match.groupdict() day = list(filter(None, (match['day1'], match['day2']))) assert(len(day) == 1) day = int(day[0]) month = list(filter(None, (match['month1'], match['month2'], match['month3']))) assert(len(month) == 1) month = months.index(month[0]) + 1 if match['year2digit']: year = int(match['year2digit']) cur_year = time.gmtime().tm_year if year > cur_year % 100 + 50: cur_year -= 50 year += cur_year // 100 * 100 else: year = list(filter(None, (match['year1'], match['year3']))) assert(len(year) == 1) year = int(year[0]) t = list(filter(None, (match['time1'], match['time2']))) assert(len(t) == 1) (hour, min, sec) = map(int, t[0].split(':')) return time.gmtime(calendar.timegm([year, month, day, hour, min, sec, 0, 0, 0]))
<commit_before><commit_msg>Add some python code to parse a HTTP-date<commit_after>#!/usr/bin/env python3 import re months = 'Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split() http_month_pattern = '(%s)' % '|'.join(months) http_time_pattern = '\d\d:\d\d:\d\d' http_date_pattern = re.compile(r'''^ \S+ ( ,\s (?P<day1>\d+) ( \s (?P<month1>{0}) \s (?P<year1>\d+) | - (?P<month2>{0}) - (?P<year2digit>\d+) ) \s (?P<time1>{1}) \s GMT | \s (?P<month3>{0}) \s+ (?P<day2>\d+) \s (?P<time2>{1}) \s (?P<year3>\d+) ) $'''.format(http_month_pattern, http_time_pattern), re.VERBOSE) def parse_http_date(date): ''' Parses a string in any of the three standard HTTP-date formats (e.g. HTTP Last-Modified header) and returns a UTC tuple. It might be simpler to use time.strptime against the three possible formats and see which one works, but the locale specific stuff that does worries me since this string is locale agnostic. Maybe I'm just over-paranoid? ''' import time import calendar match = http_date_pattern.match(date) if match is None: raise SyntaxError('Could not parse HTTP-date: %s' % date) match = match.groupdict() day = list(filter(None, (match['day1'], match['day2']))) assert(len(day) == 1) day = int(day[0]) month = list(filter(None, (match['month1'], match['month2'], match['month3']))) assert(len(month) == 1) month = months.index(month[0]) + 1 if match['year2digit']: year = int(match['year2digit']) cur_year = time.gmtime().tm_year if year > cur_year % 100 + 50: cur_year -= 50 year += cur_year // 100 * 100 else: year = list(filter(None, (match['year1'], match['year3']))) assert(len(year) == 1) year = int(year[0]) t = list(filter(None, (match['time1'], match['time2']))) assert(len(t) == 1) (hour, min, sec) = map(int, t[0].split(':')) return time.gmtime(calendar.timegm([year, month, day, hour, min, sec, 0, 0, 0]))
b9220e5942c09b657afe748aee67d6342c6f1ac1
tests/test_polymorphic.py
tests/test_polymorphic.py
""" Tests of polymorphic behaviours """ import odin from odin.resources import create_resource_from_dict class AbstractResource(odin.Resource): class Meta: abstract = True namespace = "au.com.example.abstracts" type_field = "type" class ResourceA(AbstractResource): class Meta: type_field = "type" class ResourceB(AbstractResource): pass def test_resolve_resource_using_full_type_field(): actual = create_resource_from_dict( {"type": "au.com.example.abstracts.ResourceA"}, AbstractResource, full_clean=False, ) assert isinstance(actual, ResourceA) def test_resolve_resource_using_partial_type_field(): actual = create_resource_from_dict( {"type": "ResourceB"}, AbstractResource, full_clean=False, ) assert isinstance(actual, ResourceB)
Add tests for polymorphic types
Add tests for polymorphic types
Python
bsd-3-clause
python-odin/odin
Add tests for polymorphic types
""" Tests of polymorphic behaviours """ import odin from odin.resources import create_resource_from_dict class AbstractResource(odin.Resource): class Meta: abstract = True namespace = "au.com.example.abstracts" type_field = "type" class ResourceA(AbstractResource): class Meta: type_field = "type" class ResourceB(AbstractResource): pass def test_resolve_resource_using_full_type_field(): actual = create_resource_from_dict( {"type": "au.com.example.abstracts.ResourceA"}, AbstractResource, full_clean=False, ) assert isinstance(actual, ResourceA) def test_resolve_resource_using_partial_type_field(): actual = create_resource_from_dict( {"type": "ResourceB"}, AbstractResource, full_clean=False, ) assert isinstance(actual, ResourceB)
<commit_before><commit_msg>Add tests for polymorphic types<commit_after>
""" Tests of polymorphic behaviours """ import odin from odin.resources import create_resource_from_dict class AbstractResource(odin.Resource): class Meta: abstract = True namespace = "au.com.example.abstracts" type_field = "type" class ResourceA(AbstractResource): class Meta: type_field = "type" class ResourceB(AbstractResource): pass def test_resolve_resource_using_full_type_field(): actual = create_resource_from_dict( {"type": "au.com.example.abstracts.ResourceA"}, AbstractResource, full_clean=False, ) assert isinstance(actual, ResourceA) def test_resolve_resource_using_partial_type_field(): actual = create_resource_from_dict( {"type": "ResourceB"}, AbstractResource, full_clean=False, ) assert isinstance(actual, ResourceB)
Add tests for polymorphic types""" Tests of polymorphic behaviours """ import odin from odin.resources import create_resource_from_dict class AbstractResource(odin.Resource): class Meta: abstract = True namespace = "au.com.example.abstracts" type_field = "type" class ResourceA(AbstractResource): class Meta: type_field = "type" class ResourceB(AbstractResource): pass def test_resolve_resource_using_full_type_field(): actual = create_resource_from_dict( {"type": "au.com.example.abstracts.ResourceA"}, AbstractResource, full_clean=False, ) assert isinstance(actual, ResourceA) def test_resolve_resource_using_partial_type_field(): actual = create_resource_from_dict( {"type": "ResourceB"}, AbstractResource, full_clean=False, ) assert isinstance(actual, ResourceB)
<commit_before><commit_msg>Add tests for polymorphic types<commit_after>""" Tests of polymorphic behaviours """ import odin from odin.resources import create_resource_from_dict class AbstractResource(odin.Resource): class Meta: abstract = True namespace = "au.com.example.abstracts" type_field = "type" class ResourceA(AbstractResource): class Meta: type_field = "type" class ResourceB(AbstractResource): pass def test_resolve_resource_using_full_type_field(): actual = create_resource_from_dict( {"type": "au.com.example.abstracts.ResourceA"}, AbstractResource, full_clean=False, ) assert isinstance(actual, ResourceA) def test_resolve_resource_using_partial_type_field(): actual = create_resource_from_dict( {"type": "ResourceB"}, AbstractResource, full_clean=False, ) assert isinstance(actual, ResourceB)
d9296356f9616a7712774e2349d6392f30e352cd
indico/MaKaC/plugins/EPayment/options.py
indico/MaKaC/plugins/EPayment/options.py
# -*- coding: utf-8 -*- ## ## ## This file is part of CDS Indico. ## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 CERN. ## ## CDS Indico is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## CDS Indico is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with CDS Indico; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. globalOptions = [ ("customCurrency", {"description": _("Add the currencies you want to manage"), "type": 'currency', "defaultValue": [{'name': 'Swiss Franc', 'abbreviation': 'CHF'}, {'name': 'Swiss Franc', 'abbreviation': 'CHF'}, {'name': 'Euro', 'abbreviation': 'EUR'}, {'name': 'US dollar', 'abbreviation': 'USD'}], "editable": True, "visible": True}), ]
Add option file for EPayment administrator
[IMP] Add option file for EPayment administrator
Python
mit
mic4ael/indico,indico/indico,OmeGak/indico,ThiefMaster/indico,pferreir/indico,mvidalgarcia/indico,pferreir/indico,DirkHoffmann/indico,ThiefMaster/indico,OmeGak/indico,ThiefMaster/indico,DirkHoffmann/indico,indico/indico,DirkHoffmann/indico,indico/indico,OmeGak/indico,mic4ael/indico,mvidalgarcia/indico,mvidalgarcia/indico,mvidalgarcia/indico,mic4ael/indico,DirkHoffmann/indico,ThiefMaster/indico,pferreir/indico,OmeGak/indico,indico/indico,mic4ael/indico,pferreir/indico
[IMP] Add option file for EPayment administrator
# -*- coding: utf-8 -*- ## ## ## This file is part of CDS Indico. ## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 CERN. ## ## CDS Indico is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## CDS Indico is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with CDS Indico; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. globalOptions = [ ("customCurrency", {"description": _("Add the currencies you want to manage"), "type": 'currency', "defaultValue": [{'name': 'Swiss Franc', 'abbreviation': 'CHF'}, {'name': 'Swiss Franc', 'abbreviation': 'CHF'}, {'name': 'Euro', 'abbreviation': 'EUR'}, {'name': 'US dollar', 'abbreviation': 'USD'}], "editable": True, "visible": True}), ]
<commit_before><commit_msg>[IMP] Add option file for EPayment administrator<commit_after>
# -*- coding: utf-8 -*- ## ## ## This file is part of CDS Indico. ## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 CERN. ## ## CDS Indico is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## CDS Indico is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with CDS Indico; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. globalOptions = [ ("customCurrency", {"description": _("Add the currencies you want to manage"), "type": 'currency', "defaultValue": [{'name': 'Swiss Franc', 'abbreviation': 'CHF'}, {'name': 'Swiss Franc', 'abbreviation': 'CHF'}, {'name': 'Euro', 'abbreviation': 'EUR'}, {'name': 'US dollar', 'abbreviation': 'USD'}], "editable": True, "visible": True}), ]
[IMP] Add option file for EPayment administrator# -*- coding: utf-8 -*- ## ## ## This file is part of CDS Indico. ## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 CERN. ## ## CDS Indico is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## CDS Indico is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with CDS Indico; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. globalOptions = [ ("customCurrency", {"description": _("Add the currencies you want to manage"), "type": 'currency', "defaultValue": [{'name': 'Swiss Franc', 'abbreviation': 'CHF'}, {'name': 'Swiss Franc', 'abbreviation': 'CHF'}, {'name': 'Euro', 'abbreviation': 'EUR'}, {'name': 'US dollar', 'abbreviation': 'USD'}], "editable": True, "visible": True}), ]
<commit_before><commit_msg>[IMP] Add option file for EPayment administrator<commit_after># -*- coding: utf-8 -*- ## ## ## This file is part of CDS Indico. ## Copyright (C) 2002, 2003, 2004, 2005, 2006, 2007 CERN. ## ## CDS Indico is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## CDS Indico is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with CDS Indico; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. globalOptions = [ ("customCurrency", {"description": _("Add the currencies you want to manage"), "type": 'currency', "defaultValue": [{'name': 'Swiss Franc', 'abbreviation': 'CHF'}, {'name': 'Swiss Franc', 'abbreviation': 'CHF'}, {'name': 'Euro', 'abbreviation': 'EUR'}, {'name': 'US dollar', 'abbreviation': 'USD'}], "editable": True, "visible": True}), ]
a38f46566b18803d0b5ab0d75a267ee9ac3ceea3
doc/examples/viennagrid_wrapper/io.py
doc/examples/viennagrid_wrapper/io.py
#!/usr/bin/env python # # This example shows how to read and write mesh files using the low-level ViennaGrid # wrapper for Python (viennagrid.wrapper). from __future__ import print_function # In this example, we will set up a domain of triangles in the cartesian 3D # space from the contents of a Netgen mesh file. # # For that purpose, we need to define a domain and, eventually, also a segmentation # (in case we want to read segmentation data from the mesh file), and we need the # Netgen reader function, too. # # (Notice that the 'read_netgen' function and all other I/O functions # work with any type of domain and segmentation without name change.) from viennagrid.wrapper import TriangularCartesian3D_Domain as Domain from viennagrid.wrapper import TriangularCartesian3D_Segmentation as Segmentation from viennagrid.wrapper import read_netgen # In case we want to read only the domain information from the mesh file, we would # just create an empty domain and call the Netgen reader on it with the file path # where the mesh file can be found. domain = Domain() read_netgen('../data/half-trigate.mesh', domain) # In case we want to read not only the domain information, but also the segmentation # information from the mesh file, we would have to create an empty domain and an # empty segmentation on that domain, and then call the Netgen reader. domain = Domain() segmentation = Segmentation(domain) read_netgen('../data/half-trigate.mesh', domain, segmentation)
Write an example of the use of the Netgen reader.
Write an example of the use of the Netgen reader.
Python
mit
jonancm/viennagrid-python,jonancm/viennagrid-python,jonancm/viennagrid-python
Write an example of the use of the Netgen reader.
#!/usr/bin/env python # # This example shows how to read and write mesh files using the low-level ViennaGrid # wrapper for Python (viennagrid.wrapper). from __future__ import print_function # In this example, we will set up a domain of triangles in the cartesian 3D # space from the contents of a Netgen mesh file. # # For that purpose, we need to define a domain and, eventually, also a segmentation # (in case we want to read segmentation data from the mesh file), and we need the # Netgen reader function, too. # # (Notice that the 'read_netgen' function and all other I/O functions # work with any type of domain and segmentation without name change.) from viennagrid.wrapper import TriangularCartesian3D_Domain as Domain from viennagrid.wrapper import TriangularCartesian3D_Segmentation as Segmentation from viennagrid.wrapper import read_netgen # In case we want to read only the domain information from the mesh file, we would # just create an empty domain and call the Netgen reader on it with the file path # where the mesh file can be found. domain = Domain() read_netgen('../data/half-trigate.mesh', domain) # In case we want to read not only the domain information, but also the segmentation # information from the mesh file, we would have to create an empty domain and an # empty segmentation on that domain, and then call the Netgen reader. domain = Domain() segmentation = Segmentation(domain) read_netgen('../data/half-trigate.mesh', domain, segmentation)
<commit_before><commit_msg>Write an example of the use of the Netgen reader.<commit_after>
#!/usr/bin/env python # # This example shows how to read and write mesh files using the low-level ViennaGrid # wrapper for Python (viennagrid.wrapper). from __future__ import print_function # In this example, we will set up a domain of triangles in the cartesian 3D # space from the contents of a Netgen mesh file. # # For that purpose, we need to define a domain and, eventually, also a segmentation # (in case we want to read segmentation data from the mesh file), and we need the # Netgen reader function, too. # # (Notice that the 'read_netgen' function and all other I/O functions # work with any type of domain and segmentation without name change.) from viennagrid.wrapper import TriangularCartesian3D_Domain as Domain from viennagrid.wrapper import TriangularCartesian3D_Segmentation as Segmentation from viennagrid.wrapper import read_netgen # In case we want to read only the domain information from the mesh file, we would # just create an empty domain and call the Netgen reader on it with the file path # where the mesh file can be found. domain = Domain() read_netgen('../data/half-trigate.mesh', domain) # In case we want to read not only the domain information, but also the segmentation # information from the mesh file, we would have to create an empty domain and an # empty segmentation on that domain, and then call the Netgen reader. domain = Domain() segmentation = Segmentation(domain) read_netgen('../data/half-trigate.mesh', domain, segmentation)
Write an example of the use of the Netgen reader.#!/usr/bin/env python # # This example shows how to read and write mesh files using the low-level ViennaGrid # wrapper for Python (viennagrid.wrapper). from __future__ import print_function # In this example, we will set up a domain of triangles in the cartesian 3D # space from the contents of a Netgen mesh file. # # For that purpose, we need to define a domain and, eventually, also a segmentation # (in case we want to read segmentation data from the mesh file), and we need the # Netgen reader function, too. # # (Notice that the 'read_netgen' function and all other I/O functions # work with any type of domain and segmentation without name change.) from viennagrid.wrapper import TriangularCartesian3D_Domain as Domain from viennagrid.wrapper import TriangularCartesian3D_Segmentation as Segmentation from viennagrid.wrapper import read_netgen # In case we want to read only the domain information from the mesh file, we would # just create an empty domain and call the Netgen reader on it with the file path # where the mesh file can be found. domain = Domain() read_netgen('../data/half-trigate.mesh', domain) # In case we want to read not only the domain information, but also the segmentation # information from the mesh file, we would have to create an empty domain and an # empty segmentation on that domain, and then call the Netgen reader. domain = Domain() segmentation = Segmentation(domain) read_netgen('../data/half-trigate.mesh', domain, segmentation)
<commit_before><commit_msg>Write an example of the use of the Netgen reader.<commit_after>#!/usr/bin/env python # # This example shows how to read and write mesh files using the low-level ViennaGrid # wrapper for Python (viennagrid.wrapper). from __future__ import print_function # In this example, we will set up a domain of triangles in the cartesian 3D # space from the contents of a Netgen mesh file. # # For that purpose, we need to define a domain and, eventually, also a segmentation # (in case we want to read segmentation data from the mesh file), and we need the # Netgen reader function, too. # # (Notice that the 'read_netgen' function and all other I/O functions # work with any type of domain and segmentation without name change.) from viennagrid.wrapper import TriangularCartesian3D_Domain as Domain from viennagrid.wrapper import TriangularCartesian3D_Segmentation as Segmentation from viennagrid.wrapper import read_netgen # In case we want to read only the domain information from the mesh file, we would # just create an empty domain and call the Netgen reader on it with the file path # where the mesh file can be found. domain = Domain() read_netgen('../data/half-trigate.mesh', domain) # In case we want to read not only the domain information, but also the segmentation # information from the mesh file, we would have to create an empty domain and an # empty segmentation on that domain, and then call the Netgen reader. domain = Domain() segmentation = Segmentation(domain) read_netgen('../data/half-trigate.mesh', domain, segmentation)
57bee62b1bd6c42469f420e036937ea3c9ca56b7
wikilink/db/connection.py
wikilink/db/connection.py
from sqlalchemy import create_engine from sqlalchemy_utils import functions from sqlalchemy.orm import sessionmaker class Connection: def __init__(self, db, name, password, ip, port): if db == "postgresql": connection = "postgresql+psycopg2://" + name + ":" + password + "@" + ip + ":" + port elif db == "mysql": connection = "mysql+pymysql://" + name + ":" + password + "@" + ip + ":" + port db_name = 'wikilink' # Turn off echo engine = create_engine(connection + "/" + db_name + '?charset=utf8', echo=False, encoding='utf-8') if not functions.database_exists(engine.url): functions.create_database(engine.url) self.session = sessionmaker(bind=engine)() # If table don't exist, Create. if (not engine.dialect.has_table(engine, 'link') and not engine.dialect.has_table(engine, 'page')): Base.metadata.create_all(engine)
Change class name to Connection
Change class name to Connection Signed-off-by: Tran Ly Vu <0555cc0f3d5a46ac8c0e84ddf31443494c66bd55@gmail.com>
Python
apache-2.0
tranlyvu/find-link,tranlyvu/findLink
Change class name to Connection Signed-off-by: Tran Ly Vu <0555cc0f3d5a46ac8c0e84ddf31443494c66bd55@gmail.com>
from sqlalchemy import create_engine from sqlalchemy_utils import functions from sqlalchemy.orm import sessionmaker class Connection: def __init__(self, db, name, password, ip, port): if db == "postgresql": connection = "postgresql+psycopg2://" + name + ":" + password + "@" + ip + ":" + port elif db == "mysql": connection = "mysql+pymysql://" + name + ":" + password + "@" + ip + ":" + port db_name = 'wikilink' # Turn off echo engine = create_engine(connection + "/" + db_name + '?charset=utf8', echo=False, encoding='utf-8') if not functions.database_exists(engine.url): functions.create_database(engine.url) self.session = sessionmaker(bind=engine)() # If table don't exist, Create. if (not engine.dialect.has_table(engine, 'link') and not engine.dialect.has_table(engine, 'page')): Base.metadata.create_all(engine)
<commit_before><commit_msg>Change class name to Connection Signed-off-by: Tran Ly Vu <0555cc0f3d5a46ac8c0e84ddf31443494c66bd55@gmail.com><commit_after>
from sqlalchemy import create_engine from sqlalchemy_utils import functions from sqlalchemy.orm import sessionmaker class Connection: def __init__(self, db, name, password, ip, port): if db == "postgresql": connection = "postgresql+psycopg2://" + name + ":" + password + "@" + ip + ":" + port elif db == "mysql": connection = "mysql+pymysql://" + name + ":" + password + "@" + ip + ":" + port db_name = 'wikilink' # Turn off echo engine = create_engine(connection + "/" + db_name + '?charset=utf8', echo=False, encoding='utf-8') if not functions.database_exists(engine.url): functions.create_database(engine.url) self.session = sessionmaker(bind=engine)() # If table don't exist, Create. if (not engine.dialect.has_table(engine, 'link') and not engine.dialect.has_table(engine, 'page')): Base.metadata.create_all(engine)
Change class name to Connection Signed-off-by: Tran Ly Vu <0555cc0f3d5a46ac8c0e84ddf31443494c66bd55@gmail.com>from sqlalchemy import create_engine from sqlalchemy_utils import functions from sqlalchemy.orm import sessionmaker class Connection: def __init__(self, db, name, password, ip, port): if db == "postgresql": connection = "postgresql+psycopg2://" + name + ":" + password + "@" + ip + ":" + port elif db == "mysql": connection = "mysql+pymysql://" + name + ":" + password + "@" + ip + ":" + port db_name = 'wikilink' # Turn off echo engine = create_engine(connection + "/" + db_name + '?charset=utf8', echo=False, encoding='utf-8') if not functions.database_exists(engine.url): functions.create_database(engine.url) self.session = sessionmaker(bind=engine)() # If table don't exist, Create. if (not engine.dialect.has_table(engine, 'link') and not engine.dialect.has_table(engine, 'page')): Base.metadata.create_all(engine)
<commit_before><commit_msg>Change class name to Connection Signed-off-by: Tran Ly Vu <0555cc0f3d5a46ac8c0e84ddf31443494c66bd55@gmail.com><commit_after>from sqlalchemy import create_engine from sqlalchemy_utils import functions from sqlalchemy.orm import sessionmaker class Connection: def __init__(self, db, name, password, ip, port): if db == "postgresql": connection = "postgresql+psycopg2://" + name + ":" + password + "@" + ip + ":" + port elif db == "mysql": connection = "mysql+pymysql://" + name + ":" + password + "@" + ip + ":" + port db_name = 'wikilink' # Turn off echo engine = create_engine(connection + "/" + db_name + '?charset=utf8', echo=False, encoding='utf-8') if not functions.database_exists(engine.url): functions.create_database(engine.url) self.session = sessionmaker(bind=engine)() # If table don't exist, Create. if (not engine.dialect.has_table(engine, 'link') and not engine.dialect.has_table(engine, 'page')): Base.metadata.create_all(engine)
ec99e4fea77074bfe525bb2aeecbe66be231cc7d
integration-tests/multiple_clients_test.py
integration-tests/multiple_clients_test.py
#!/usr/bin/env python2.7 from __future__ import absolute_import, division, unicode_literals import json import requests import websocket test_url ='http://localhost:3000/so/cool?auth=mS7karSP9QbD2FFdgBk2QmuTna7fJyp7ll0Vg8gnffIBHKILSrusMslucBzMhwO' def validate_data(connections, expected_result): for connection in connections: print 'receiving {}'.format(connection) result = connection.recv() parsed_result = json.loads(result) assert parsed_result == expected_result, 'Input data: {} is different from output data: {}'.format( expected_result, parsed_result) print 'All 10 clients received {} successfully!'.format(expected_result) def main(): # Make sure the value is null initially requests.put(test_url, json.dumps(None)) connections = [] for i in range(10): connection = websocket.create_connection("ws://localhost:3000/so/cool") connections.append(connection) validate_data(connections, None) # Put some data into icepeak over HTTP new_data = {'status': 'freezing'} requests.put(test_url, json.dumps(new_data)) # Make sure all clients get the new data validate_data(connections, new_data) # Reset the value to make the test idempotent requests.put(test_url, json.dumps(None)) if __name__ == '__main__': main()
Add a multiple client integration test
Add a multiple client integration test It appears to have found an issue right away: Clients are not disconnected even if the process spawning them has exited.
Python
bsd-3-clause
channable/icepeak,channable/icepeak,channable/icepeak
Add a multiple client integration test It appears to have found an issue right away: Clients are not disconnected even if the process spawning them has exited.
#!/usr/bin/env python2.7 from __future__ import absolute_import, division, unicode_literals import json import requests import websocket test_url ='http://localhost:3000/so/cool?auth=mS7karSP9QbD2FFdgBk2QmuTna7fJyp7ll0Vg8gnffIBHKILSrusMslucBzMhwO' def validate_data(connections, expected_result): for connection in connections: print 'receiving {}'.format(connection) result = connection.recv() parsed_result = json.loads(result) assert parsed_result == expected_result, 'Input data: {} is different from output data: {}'.format( expected_result, parsed_result) print 'All 10 clients received {} successfully!'.format(expected_result) def main(): # Make sure the value is null initially requests.put(test_url, json.dumps(None)) connections = [] for i in range(10): connection = websocket.create_connection("ws://localhost:3000/so/cool") connections.append(connection) validate_data(connections, None) # Put some data into icepeak over HTTP new_data = {'status': 'freezing'} requests.put(test_url, json.dumps(new_data)) # Make sure all clients get the new data validate_data(connections, new_data) # Reset the value to make the test idempotent requests.put(test_url, json.dumps(None)) if __name__ == '__main__': main()
<commit_before><commit_msg>Add a multiple client integration test It appears to have found an issue right away: Clients are not disconnected even if the process spawning them has exited.<commit_after>
#!/usr/bin/env python2.7 from __future__ import absolute_import, division, unicode_literals import json import requests import websocket test_url ='http://localhost:3000/so/cool?auth=mS7karSP9QbD2FFdgBk2QmuTna7fJyp7ll0Vg8gnffIBHKILSrusMslucBzMhwO' def validate_data(connections, expected_result): for connection in connections: print 'receiving {}'.format(connection) result = connection.recv() parsed_result = json.loads(result) assert parsed_result == expected_result, 'Input data: {} is different from output data: {}'.format( expected_result, parsed_result) print 'All 10 clients received {} successfully!'.format(expected_result) def main(): # Make sure the value is null initially requests.put(test_url, json.dumps(None)) connections = [] for i in range(10): connection = websocket.create_connection("ws://localhost:3000/so/cool") connections.append(connection) validate_data(connections, None) # Put some data into icepeak over HTTP new_data = {'status': 'freezing'} requests.put(test_url, json.dumps(new_data)) # Make sure all clients get the new data validate_data(connections, new_data) # Reset the value to make the test idempotent requests.put(test_url, json.dumps(None)) if __name__ == '__main__': main()
Add a multiple client integration test It appears to have found an issue right away: Clients are not disconnected even if the process spawning them has exited.#!/usr/bin/env python2.7 from __future__ import absolute_import, division, unicode_literals import json import requests import websocket test_url ='http://localhost:3000/so/cool?auth=mS7karSP9QbD2FFdgBk2QmuTna7fJyp7ll0Vg8gnffIBHKILSrusMslucBzMhwO' def validate_data(connections, expected_result): for connection in connections: print 'receiving {}'.format(connection) result = connection.recv() parsed_result = json.loads(result) assert parsed_result == expected_result, 'Input data: {} is different from output data: {}'.format( expected_result, parsed_result) print 'All 10 clients received {} successfully!'.format(expected_result) def main(): # Make sure the value is null initially requests.put(test_url, json.dumps(None)) connections = [] for i in range(10): connection = websocket.create_connection("ws://localhost:3000/so/cool") connections.append(connection) validate_data(connections, None) # Put some data into icepeak over HTTP new_data = {'status': 'freezing'} requests.put(test_url, json.dumps(new_data)) # Make sure all clients get the new data validate_data(connections, new_data) # Reset the value to make the test idempotent requests.put(test_url, json.dumps(None)) if __name__ == '__main__': main()
<commit_before><commit_msg>Add a multiple client integration test It appears to have found an issue right away: Clients are not disconnected even if the process spawning them has exited.<commit_after>#!/usr/bin/env python2.7 from __future__ import absolute_import, division, unicode_literals import json import requests import websocket test_url ='http://localhost:3000/so/cool?auth=mS7karSP9QbD2FFdgBk2QmuTna7fJyp7ll0Vg8gnffIBHKILSrusMslucBzMhwO' def validate_data(connections, expected_result): for connection in connections: print 'receiving {}'.format(connection) result = connection.recv() parsed_result = json.loads(result) assert parsed_result == expected_result, 'Input data: {} is different from output data: {}'.format( expected_result, parsed_result) print 'All 10 clients received {} successfully!'.format(expected_result) def main(): # Make sure the value is null initially requests.put(test_url, json.dumps(None)) connections = [] for i in range(10): connection = websocket.create_connection("ws://localhost:3000/so/cool") connections.append(connection) validate_data(connections, None) # Put some data into icepeak over HTTP new_data = {'status': 'freezing'} requests.put(test_url, json.dumps(new_data)) # Make sure all clients get the new data validate_data(connections, new_data) # Reset the value to make the test idempotent requests.put(test_url, json.dumps(None)) if __name__ == '__main__': main()
c6c92b1d073151d0c7173d9718e79465c3ce5803
midterm/problem4.py
midterm/problem4.py
# Problem 4 # 10.0 points possible (graded) # Implement a function called closest_power that meets the specifications below. # For example, # closest_power(3,12) returns 2 # closest_power(4,12) returns 2 # closest_power(4,1) returns 0 def closest_power(base, num): ''' base: base of the exponential, integer > 1 num: number you want to be closest to, integer > 0 Find the integer exponent such that base**exponent is closest to num. Note that the base**exponent may be either greater or smaller than num. In case of a tie, return the smaller value. Returns the exponent. ''' num = int(num) difference = base**num integerExponent = num exponents = range(0, num) for exponent in exponents: exponentValue = base**exponent if abs(exponentValue - num) < difference: difference = abs(exponentValue - num) integerExponent = exponent else: break return integerExponent print(closest_power(10, 550.0))
Implement a function called closest_power
Implement a function called closest_power
Python
mit
Kunal57/MIT_6.00.1x
Implement a function called closest_power
# Problem 4 # 10.0 points possible (graded) # Implement a function called closest_power that meets the specifications below. # For example, # closest_power(3,12) returns 2 # closest_power(4,12) returns 2 # closest_power(4,1) returns 0 def closest_power(base, num): ''' base: base of the exponential, integer > 1 num: number you want to be closest to, integer > 0 Find the integer exponent such that base**exponent is closest to num. Note that the base**exponent may be either greater or smaller than num. In case of a tie, return the smaller value. Returns the exponent. ''' num = int(num) difference = base**num integerExponent = num exponents = range(0, num) for exponent in exponents: exponentValue = base**exponent if abs(exponentValue - num) < difference: difference = abs(exponentValue - num) integerExponent = exponent else: break return integerExponent print(closest_power(10, 550.0))
<commit_before><commit_msg>Implement a function called closest_power<commit_after>
# Problem 4 # 10.0 points possible (graded) # Implement a function called closest_power that meets the specifications below. # For example, # closest_power(3,12) returns 2 # closest_power(4,12) returns 2 # closest_power(4,1) returns 0 def closest_power(base, num): ''' base: base of the exponential, integer > 1 num: number you want to be closest to, integer > 0 Find the integer exponent such that base**exponent is closest to num. Note that the base**exponent may be either greater or smaller than num. In case of a tie, return the smaller value. Returns the exponent. ''' num = int(num) difference = base**num integerExponent = num exponents = range(0, num) for exponent in exponents: exponentValue = base**exponent if abs(exponentValue - num) < difference: difference = abs(exponentValue - num) integerExponent = exponent else: break return integerExponent print(closest_power(10, 550.0))
Implement a function called closest_power# Problem 4 # 10.0 points possible (graded) # Implement a function called closest_power that meets the specifications below. # For example, # closest_power(3,12) returns 2 # closest_power(4,12) returns 2 # closest_power(4,1) returns 0 def closest_power(base, num): ''' base: base of the exponential, integer > 1 num: number you want to be closest to, integer > 0 Find the integer exponent such that base**exponent is closest to num. Note that the base**exponent may be either greater or smaller than num. In case of a tie, return the smaller value. Returns the exponent. ''' num = int(num) difference = base**num integerExponent = num exponents = range(0, num) for exponent in exponents: exponentValue = base**exponent if abs(exponentValue - num) < difference: difference = abs(exponentValue - num) integerExponent = exponent else: break return integerExponent print(closest_power(10, 550.0))
<commit_before><commit_msg>Implement a function called closest_power<commit_after># Problem 4 # 10.0 points possible (graded) # Implement a function called closest_power that meets the specifications below. # For example, # closest_power(3,12) returns 2 # closest_power(4,12) returns 2 # closest_power(4,1) returns 0 def closest_power(base, num): ''' base: base of the exponential, integer > 1 num: number you want to be closest to, integer > 0 Find the integer exponent such that base**exponent is closest to num. Note that the base**exponent may be either greater or smaller than num. In case of a tie, return the smaller value. Returns the exponent. ''' num = int(num) difference = base**num integerExponent = num exponents = range(0, num) for exponent in exponents: exponentValue = base**exponent if abs(exponentValue - num) < difference: difference = abs(exponentValue - num) integerExponent = exponent else: break return integerExponent print(closest_power(10, 550.0))
840c34f9428cc4b7db6dbc610a72e3ed5957e04c
neutron_vpnaas/cmd/eventlet/vyatta_agent.py
neutron_vpnaas/cmd/eventlet/vyatta_agent.py
# Copyright 2015 Brocade Communications System, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_vpnaas.services.vpn import vyatta_agent def main(): vyatta_agent.main()
Introduce Vyatta VPN agent cmd in monkey patched eventlet module
Introduce Vyatta VPN agent cmd in monkey patched eventlet module Commit [1] introduced an entry for reference vpn agent in cmd/eventlet module so that they get monkey patched in an uniform way. This commit introduces the same for vyatta vpn agent. [1] I2d7081dbd4cb532332e3b66667bb8c71aa5a6658 Change-Id: Icc3c8f040bd4fbb2ad36e3d32a381a7ba7993d53 Closes-Bug: 1434337
Python
apache-2.0
openstack/neutron-vpnaas,openstack/neutron-vpnaas
Introduce Vyatta VPN agent cmd in monkey patched eventlet module Commit [1] introduced an entry for reference vpn agent in cmd/eventlet module so that they get monkey patched in an uniform way. This commit introduces the same for vyatta vpn agent. [1] I2d7081dbd4cb532332e3b66667bb8c71aa5a6658 Change-Id: Icc3c8f040bd4fbb2ad36e3d32a381a7ba7993d53 Closes-Bug: 1434337
# Copyright 2015 Brocade Communications System, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_vpnaas.services.vpn import vyatta_agent def main(): vyatta_agent.main()
<commit_before><commit_msg>Introduce Vyatta VPN agent cmd in monkey patched eventlet module Commit [1] introduced an entry for reference vpn agent in cmd/eventlet module so that they get monkey patched in an uniform way. This commit introduces the same for vyatta vpn agent. [1] I2d7081dbd4cb532332e3b66667bb8c71aa5a6658 Change-Id: Icc3c8f040bd4fbb2ad36e3d32a381a7ba7993d53 Closes-Bug: 1434337<commit_after>
# Copyright 2015 Brocade Communications System, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_vpnaas.services.vpn import vyatta_agent def main(): vyatta_agent.main()
Introduce Vyatta VPN agent cmd in monkey patched eventlet module Commit [1] introduced an entry for reference vpn agent in cmd/eventlet module so that they get monkey patched in an uniform way. This commit introduces the same for vyatta vpn agent. [1] I2d7081dbd4cb532332e3b66667bb8c71aa5a6658 Change-Id: Icc3c8f040bd4fbb2ad36e3d32a381a7ba7993d53 Closes-Bug: 1434337# Copyright 2015 Brocade Communications System, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_vpnaas.services.vpn import vyatta_agent def main(): vyatta_agent.main()
<commit_before><commit_msg>Introduce Vyatta VPN agent cmd in monkey patched eventlet module Commit [1] introduced an entry for reference vpn agent in cmd/eventlet module so that they get monkey patched in an uniform way. This commit introduces the same for vyatta vpn agent. [1] I2d7081dbd4cb532332e3b66667bb8c71aa5a6658 Change-Id: Icc3c8f040bd4fbb2ad36e3d32a381a7ba7993d53 Closes-Bug: 1434337<commit_after># Copyright 2015 Brocade Communications System, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from neutron_vpnaas.services.vpn import vyatta_agent def main(): vyatta_agent.main()
208dcdf09afc0d625439f6c20b5272a06b86c87f
lms/djangoapps/edraak_certificates/tests/test_webview_changes.py
lms/djangoapps/edraak_certificates/tests/test_webview_changes.py
"""Tests for changes on certificates by Edraak""" from django.test.utils import override_settings from lms.djangoapps.certificates.api import get_certificate_url from lms.djangoapps.certificates.models import CertificateTemplate from lms.djangoapps.certificates.tests.test_webview_views import CommonCertificatesTestCase from openedx.core.djangoapps.models.course_details import CourseDetails class TestEdraakCertificate(CommonCertificatesTestCase): @staticmethod def _create_edraak_test_template(): """ Creates a custom certificate template that is used for Edraak changes """ template_html = """ <%namespace name='static' file='static_content.html'/> <html> <body> <b>Edraak Template</b> course_description: ${course_description} </body> </html> """ template = CertificateTemplate( name='custom template', template=template_html, organization_id=None, course_key=None, mode='honor', is_active=True, language=None ) template.save() @override_settings(FEATURES={ "CUSTOM_CERTIFICATE_TEMPLATES_ENABLED": True, "CERTIFICATES_HTML_VIEW": True }) def test_changes_on_webview(self): # Prepare attributes to check for CourseDetails.update_about_item(self.course, 'short_description', 'Edraak Test Description', self.user.id) # Creating a certificate self._add_course_certificates(count=1, signatory_count=2) self._create_edraak_test_template() test_url = get_certificate_url( user_id=self.user.id, course_id=unicode(self.course.id) ) # Getting certificate as HTML response = self.client.get(test_url) # Verifying contents self.assertContains(response, 'Edraak Template') self.assertContains(response, 'course_description: Edraak Test Description')
Add Tests for Changes of Certificate
Add Tests for Changes of Certificate
Python
agpl-3.0
Edraak/edraak-platform,Edraak/edraak-platform,Edraak/edraak-platform,Edraak/edraak-platform
Add Tests for Changes of Certificate
"""Tests for changes on certificates by Edraak""" from django.test.utils import override_settings from lms.djangoapps.certificates.api import get_certificate_url from lms.djangoapps.certificates.models import CertificateTemplate from lms.djangoapps.certificates.tests.test_webview_views import CommonCertificatesTestCase from openedx.core.djangoapps.models.course_details import CourseDetails class TestEdraakCertificate(CommonCertificatesTestCase): @staticmethod def _create_edraak_test_template(): """ Creates a custom certificate template that is used for Edraak changes """ template_html = """ <%namespace name='static' file='static_content.html'/> <html> <body> <b>Edraak Template</b> course_description: ${course_description} </body> </html> """ template = CertificateTemplate( name='custom template', template=template_html, organization_id=None, course_key=None, mode='honor', is_active=True, language=None ) template.save() @override_settings(FEATURES={ "CUSTOM_CERTIFICATE_TEMPLATES_ENABLED": True, "CERTIFICATES_HTML_VIEW": True }) def test_changes_on_webview(self): # Prepare attributes to check for CourseDetails.update_about_item(self.course, 'short_description', 'Edraak Test Description', self.user.id) # Creating a certificate self._add_course_certificates(count=1, signatory_count=2) self._create_edraak_test_template() test_url = get_certificate_url( user_id=self.user.id, course_id=unicode(self.course.id) ) # Getting certificate as HTML response = self.client.get(test_url) # Verifying contents self.assertContains(response, 'Edraak Template') self.assertContains(response, 'course_description: Edraak Test Description')
<commit_before><commit_msg>Add Tests for Changes of Certificate<commit_after>
"""Tests for changes on certificates by Edraak""" from django.test.utils import override_settings from lms.djangoapps.certificates.api import get_certificate_url from lms.djangoapps.certificates.models import CertificateTemplate from lms.djangoapps.certificates.tests.test_webview_views import CommonCertificatesTestCase from openedx.core.djangoapps.models.course_details import CourseDetails class TestEdraakCertificate(CommonCertificatesTestCase): @staticmethod def _create_edraak_test_template(): """ Creates a custom certificate template that is used for Edraak changes """ template_html = """ <%namespace name='static' file='static_content.html'/> <html> <body> <b>Edraak Template</b> course_description: ${course_description} </body> </html> """ template = CertificateTemplate( name='custom template', template=template_html, organization_id=None, course_key=None, mode='honor', is_active=True, language=None ) template.save() @override_settings(FEATURES={ "CUSTOM_CERTIFICATE_TEMPLATES_ENABLED": True, "CERTIFICATES_HTML_VIEW": True }) def test_changes_on_webview(self): # Prepare attributes to check for CourseDetails.update_about_item(self.course, 'short_description', 'Edraak Test Description', self.user.id) # Creating a certificate self._add_course_certificates(count=1, signatory_count=2) self._create_edraak_test_template() test_url = get_certificate_url( user_id=self.user.id, course_id=unicode(self.course.id) ) # Getting certificate as HTML response = self.client.get(test_url) # Verifying contents self.assertContains(response, 'Edraak Template') self.assertContains(response, 'course_description: Edraak Test Description')
Add Tests for Changes of Certificate"""Tests for changes on certificates by Edraak""" from django.test.utils import override_settings from lms.djangoapps.certificates.api import get_certificate_url from lms.djangoapps.certificates.models import CertificateTemplate from lms.djangoapps.certificates.tests.test_webview_views import CommonCertificatesTestCase from openedx.core.djangoapps.models.course_details import CourseDetails class TestEdraakCertificate(CommonCertificatesTestCase): @staticmethod def _create_edraak_test_template(): """ Creates a custom certificate template that is used for Edraak changes """ template_html = """ <%namespace name='static' file='static_content.html'/> <html> <body> <b>Edraak Template</b> course_description: ${course_description} </body> </html> """ template = CertificateTemplate( name='custom template', template=template_html, organization_id=None, course_key=None, mode='honor', is_active=True, language=None ) template.save() @override_settings(FEATURES={ "CUSTOM_CERTIFICATE_TEMPLATES_ENABLED": True, "CERTIFICATES_HTML_VIEW": True }) def test_changes_on_webview(self): # Prepare attributes to check for CourseDetails.update_about_item(self.course, 'short_description', 'Edraak Test Description', self.user.id) # Creating a certificate self._add_course_certificates(count=1, signatory_count=2) self._create_edraak_test_template() test_url = get_certificate_url( user_id=self.user.id, course_id=unicode(self.course.id) ) # Getting certificate as HTML response = self.client.get(test_url) # Verifying contents self.assertContains(response, 'Edraak Template') self.assertContains(response, 'course_description: Edraak Test Description')
<commit_before><commit_msg>Add Tests for Changes of Certificate<commit_after>"""Tests for changes on certificates by Edraak""" from django.test.utils import override_settings from lms.djangoapps.certificates.api import get_certificate_url from lms.djangoapps.certificates.models import CertificateTemplate from lms.djangoapps.certificates.tests.test_webview_views import CommonCertificatesTestCase from openedx.core.djangoapps.models.course_details import CourseDetails class TestEdraakCertificate(CommonCertificatesTestCase): @staticmethod def _create_edraak_test_template(): """ Creates a custom certificate template that is used for Edraak changes """ template_html = """ <%namespace name='static' file='static_content.html'/> <html> <body> <b>Edraak Template</b> course_description: ${course_description} </body> </html> """ template = CertificateTemplate( name='custom template', template=template_html, organization_id=None, course_key=None, mode='honor', is_active=True, language=None ) template.save() @override_settings(FEATURES={ "CUSTOM_CERTIFICATE_TEMPLATES_ENABLED": True, "CERTIFICATES_HTML_VIEW": True }) def test_changes_on_webview(self): # Prepare attributes to check for CourseDetails.update_about_item(self.course, 'short_description', 'Edraak Test Description', self.user.id) # Creating a certificate self._add_course_certificates(count=1, signatory_count=2) self._create_edraak_test_template() test_url = get_certificate_url( user_id=self.user.id, course_id=unicode(self.course.id) ) # Getting certificate as HTML response = self.client.get(test_url) # Verifying contents self.assertContains(response, 'Edraak Template') self.assertContains(response, 'course_description: Edraak Test Description')
0a70e5cd789131ae67a4cdbd25ea0fc1b140bfe2
tests/general_test.py
tests/general_test.py
from tests.integration import _IntegrationTest from bioagents import Bioagent, BioagentException from kqml import KQMLList, KQMLPerformative class TestErrorHandling(_IntegrationTest): reason = 'Found it!' def __init__(self, *args): class FindMe(BioagentException): pass class TestAgent(Bioagent): name = 'test' tasks = ['TEST'] def receive_request(self, msg, content): ret = None try: ret = Bioagent.receive_request(self, msg, content) except FindMe: reply_content = self.make_failure(TestErrorHandling.reason) if ret is None: ret = self.reply_with_content(msg, reply_content) return ret def respond_test(self, content): raise FindMe() super(TestErrorHandling, self).__init__(TestAgent) def get_message(self): content = KQMLList('TEST') content.sets('description', '') msg = KQMLPerformative('REQUEST') msg.set('content', content) return msg, content def is_correct_response(self): head = self.output.head() assert head == "FAILURE",\ "Got wrong output head: %s instead of FAILURE." % head assert self.output.get('reason') == self.reason,\ "Exception caught too soon." return True
Add test for exception handling problem.
Add test for exception handling problem.
Python
bsd-2-clause
bgyori/bioagents,sorgerlab/bioagents
Add test for exception handling problem.
from tests.integration import _IntegrationTest from bioagents import Bioagent, BioagentException from kqml import KQMLList, KQMLPerformative class TestErrorHandling(_IntegrationTest): reason = 'Found it!' def __init__(self, *args): class FindMe(BioagentException): pass class TestAgent(Bioagent): name = 'test' tasks = ['TEST'] def receive_request(self, msg, content): ret = None try: ret = Bioagent.receive_request(self, msg, content) except FindMe: reply_content = self.make_failure(TestErrorHandling.reason) if ret is None: ret = self.reply_with_content(msg, reply_content) return ret def respond_test(self, content): raise FindMe() super(TestErrorHandling, self).__init__(TestAgent) def get_message(self): content = KQMLList('TEST') content.sets('description', '') msg = KQMLPerformative('REQUEST') msg.set('content', content) return msg, content def is_correct_response(self): head = self.output.head() assert head == "FAILURE",\ "Got wrong output head: %s instead of FAILURE." % head assert self.output.get('reason') == self.reason,\ "Exception caught too soon." return True
<commit_before><commit_msg>Add test for exception handling problem.<commit_after>
from tests.integration import _IntegrationTest from bioagents import Bioagent, BioagentException from kqml import KQMLList, KQMLPerformative class TestErrorHandling(_IntegrationTest): reason = 'Found it!' def __init__(self, *args): class FindMe(BioagentException): pass class TestAgent(Bioagent): name = 'test' tasks = ['TEST'] def receive_request(self, msg, content): ret = None try: ret = Bioagent.receive_request(self, msg, content) except FindMe: reply_content = self.make_failure(TestErrorHandling.reason) if ret is None: ret = self.reply_with_content(msg, reply_content) return ret def respond_test(self, content): raise FindMe() super(TestErrorHandling, self).__init__(TestAgent) def get_message(self): content = KQMLList('TEST') content.sets('description', '') msg = KQMLPerformative('REQUEST') msg.set('content', content) return msg, content def is_correct_response(self): head = self.output.head() assert head == "FAILURE",\ "Got wrong output head: %s instead of FAILURE." % head assert self.output.get('reason') == self.reason,\ "Exception caught too soon." return True
Add test for exception handling problem.from tests.integration import _IntegrationTest from bioagents import Bioagent, BioagentException from kqml import KQMLList, KQMLPerformative class TestErrorHandling(_IntegrationTest): reason = 'Found it!' def __init__(self, *args): class FindMe(BioagentException): pass class TestAgent(Bioagent): name = 'test' tasks = ['TEST'] def receive_request(self, msg, content): ret = None try: ret = Bioagent.receive_request(self, msg, content) except FindMe: reply_content = self.make_failure(TestErrorHandling.reason) if ret is None: ret = self.reply_with_content(msg, reply_content) return ret def respond_test(self, content): raise FindMe() super(TestErrorHandling, self).__init__(TestAgent) def get_message(self): content = KQMLList('TEST') content.sets('description', '') msg = KQMLPerformative('REQUEST') msg.set('content', content) return msg, content def is_correct_response(self): head = self.output.head() assert head == "FAILURE",\ "Got wrong output head: %s instead of FAILURE." % head assert self.output.get('reason') == self.reason,\ "Exception caught too soon." return True
<commit_before><commit_msg>Add test for exception handling problem.<commit_after>from tests.integration import _IntegrationTest from bioagents import Bioagent, BioagentException from kqml import KQMLList, KQMLPerformative class TestErrorHandling(_IntegrationTest): reason = 'Found it!' def __init__(self, *args): class FindMe(BioagentException): pass class TestAgent(Bioagent): name = 'test' tasks = ['TEST'] def receive_request(self, msg, content): ret = None try: ret = Bioagent.receive_request(self, msg, content) except FindMe: reply_content = self.make_failure(TestErrorHandling.reason) if ret is None: ret = self.reply_with_content(msg, reply_content) return ret def respond_test(self, content): raise FindMe() super(TestErrorHandling, self).__init__(TestAgent) def get_message(self): content = KQMLList('TEST') content.sets('description', '') msg = KQMLPerformative('REQUEST') msg.set('content', content) return msg, content def is_correct_response(self): head = self.output.head() assert head == "FAILURE",\ "Got wrong output head: %s instead of FAILURE." % head assert self.output.get('reason') == self.reason,\ "Exception caught too soon." return True
007c2be42ae5a5cb16b04a7f0fa0dffdb0649068
CodeFights/arrayPacking.py
CodeFights/arrayPacking.py
#!/usr/local/bin/python # Code Fights Are Equally Strong Problem def arrayPacking(): pass def main(): tests = [ [], [] ] for t in tests: res = arrayPacking(t[0]) if t[1] == res: print("PASSED: arrayPacking({}) returned {}" .format(t[0], res)) else: print(("FAILED: arrayPacking({}) returned {}," "answer: {}").format(t[0], res, t[1])) if __name__ == '__main__': main()
Add file for Code Fights array packing problem
Add file for Code Fights array packing problem
Python
mit
HKuz/Test_Code
Add file for Code Fights array packing problem
#!/usr/local/bin/python # Code Fights Are Equally Strong Problem def arrayPacking(): pass def main(): tests = [ [], [] ] for t in tests: res = arrayPacking(t[0]) if t[1] == res: print("PASSED: arrayPacking({}) returned {}" .format(t[0], res)) else: print(("FAILED: arrayPacking({}) returned {}," "answer: {}").format(t[0], res, t[1])) if __name__ == '__main__': main()
<commit_before><commit_msg>Add file for Code Fights array packing problem<commit_after>
#!/usr/local/bin/python # Code Fights Are Equally Strong Problem def arrayPacking(): pass def main(): tests = [ [], [] ] for t in tests: res = arrayPacking(t[0]) if t[1] == res: print("PASSED: arrayPacking({}) returned {}" .format(t[0], res)) else: print(("FAILED: arrayPacking({}) returned {}," "answer: {}").format(t[0], res, t[1])) if __name__ == '__main__': main()
Add file for Code Fights array packing problem#!/usr/local/bin/python # Code Fights Are Equally Strong Problem def arrayPacking(): pass def main(): tests = [ [], [] ] for t in tests: res = arrayPacking(t[0]) if t[1] == res: print("PASSED: arrayPacking({}) returned {}" .format(t[0], res)) else: print(("FAILED: arrayPacking({}) returned {}," "answer: {}").format(t[0], res, t[1])) if __name__ == '__main__': main()
<commit_before><commit_msg>Add file for Code Fights array packing problem<commit_after>#!/usr/local/bin/python # Code Fights Are Equally Strong Problem def arrayPacking(): pass def main(): tests = [ [], [] ] for t in tests: res = arrayPacking(t[0]) if t[1] == res: print("PASSED: arrayPacking({}) returned {}" .format(t[0], res)) else: print(("FAILED: arrayPacking({}) returned {}," "answer: {}").format(t[0], res, t[1])) if __name__ == '__main__': main()
b9dcf30e199b19d5115f4ea7602ef10a8bfa966c
asyncio_irc/tests/test_filters.py
asyncio_irc/tests/test_filters.py
from unittest import mock, TestCase from .. import filters from ..message import ReceivedMessage class TestCommandBlacklist(TestCase): def setUp(self): self.connection = object() self.handler = mock.Mock() def test_correct(self): message = ReceivedMessage(b'COMMAND\r\n') wrapped = filters.command_blacklist([b'WRONG_COMMAND'])(self.handler) wrapped(self.connection, message) self.handler.assert_called_once_with( connection=self.connection, message=message, ) def test_incorrect(self): message = ReceivedMessage(b'WRONG_COMMAND\r\n') wrapped = filters.command_blacklist([b'WRONG_COMMAND'])(self.handler) wrapped(self.connection, message) self.assertFalse(self.handler.called) class TestCommandOnly(TestCase): def setUp(self): self.connection = object() self.handler = mock.Mock() def test_correct(self): message = ReceivedMessage(b'COMMAND\r\n') wrapped = filters.command_only(b'COMMAND')(self.handler) wrapped(self.connection, message) self.handler.assert_called_once_with( connection=self.connection, message=message, ) def test_incorrect(self): message = ReceivedMessage(b'WRONG_COMMAND\r\n') wrapped = filters.command_only(b'COMMAND')(self.handler) wrapped(self.connection, message) self.assertFalse(self.handler.called) class TestCommandWhitelist(TestCase): def setUp(self): self.connection = object() self.handler = mock.Mock() def test_correct(self): message = ReceivedMessage(b'COMMAND\r\n') wrapped = filters.command_whitelist([b'COMMAND'])(self.handler) wrapped(self.connection, message) self.handler.assert_called_once_with( connection=self.connection, message=message, ) def test_incorrect(self): message = ReceivedMessage(b'WRONG_COMMAND\r\n') wrapped = filters.command_whitelist([b'COMMAND'])(self.handler) wrapped(self.connection, message) self.assertFalse(self.handler.called)
Test the new filtering decorators
Test the new filtering decorators
Python
bsd-2-clause
meshy/framewirc
Test the new filtering decorators
from unittest import mock, TestCase from .. import filters from ..message import ReceivedMessage class TestCommandBlacklist(TestCase): def setUp(self): self.connection = object() self.handler = mock.Mock() def test_correct(self): message = ReceivedMessage(b'COMMAND\r\n') wrapped = filters.command_blacklist([b'WRONG_COMMAND'])(self.handler) wrapped(self.connection, message) self.handler.assert_called_once_with( connection=self.connection, message=message, ) def test_incorrect(self): message = ReceivedMessage(b'WRONG_COMMAND\r\n') wrapped = filters.command_blacklist([b'WRONG_COMMAND'])(self.handler) wrapped(self.connection, message) self.assertFalse(self.handler.called) class TestCommandOnly(TestCase): def setUp(self): self.connection = object() self.handler = mock.Mock() def test_correct(self): message = ReceivedMessage(b'COMMAND\r\n') wrapped = filters.command_only(b'COMMAND')(self.handler) wrapped(self.connection, message) self.handler.assert_called_once_with( connection=self.connection, message=message, ) def test_incorrect(self): message = ReceivedMessage(b'WRONG_COMMAND\r\n') wrapped = filters.command_only(b'COMMAND')(self.handler) wrapped(self.connection, message) self.assertFalse(self.handler.called) class TestCommandWhitelist(TestCase): def setUp(self): self.connection = object() self.handler = mock.Mock() def test_correct(self): message = ReceivedMessage(b'COMMAND\r\n') wrapped = filters.command_whitelist([b'COMMAND'])(self.handler) wrapped(self.connection, message) self.handler.assert_called_once_with( connection=self.connection, message=message, ) def test_incorrect(self): message = ReceivedMessage(b'WRONG_COMMAND\r\n') wrapped = filters.command_whitelist([b'COMMAND'])(self.handler) wrapped(self.connection, message) self.assertFalse(self.handler.called)
<commit_before><commit_msg>Test the new filtering decorators<commit_after>
from unittest import mock, TestCase from .. import filters from ..message import ReceivedMessage class TestCommandBlacklist(TestCase): def setUp(self): self.connection = object() self.handler = mock.Mock() def test_correct(self): message = ReceivedMessage(b'COMMAND\r\n') wrapped = filters.command_blacklist([b'WRONG_COMMAND'])(self.handler) wrapped(self.connection, message) self.handler.assert_called_once_with( connection=self.connection, message=message, ) def test_incorrect(self): message = ReceivedMessage(b'WRONG_COMMAND\r\n') wrapped = filters.command_blacklist([b'WRONG_COMMAND'])(self.handler) wrapped(self.connection, message) self.assertFalse(self.handler.called) class TestCommandOnly(TestCase): def setUp(self): self.connection = object() self.handler = mock.Mock() def test_correct(self): message = ReceivedMessage(b'COMMAND\r\n') wrapped = filters.command_only(b'COMMAND')(self.handler) wrapped(self.connection, message) self.handler.assert_called_once_with( connection=self.connection, message=message, ) def test_incorrect(self): message = ReceivedMessage(b'WRONG_COMMAND\r\n') wrapped = filters.command_only(b'COMMAND')(self.handler) wrapped(self.connection, message) self.assertFalse(self.handler.called) class TestCommandWhitelist(TestCase): def setUp(self): self.connection = object() self.handler = mock.Mock() def test_correct(self): message = ReceivedMessage(b'COMMAND\r\n') wrapped = filters.command_whitelist([b'COMMAND'])(self.handler) wrapped(self.connection, message) self.handler.assert_called_once_with( connection=self.connection, message=message, ) def test_incorrect(self): message = ReceivedMessage(b'WRONG_COMMAND\r\n') wrapped = filters.command_whitelist([b'COMMAND'])(self.handler) wrapped(self.connection, message) self.assertFalse(self.handler.called)
Test the new filtering decoratorsfrom unittest import mock, TestCase from .. import filters from ..message import ReceivedMessage class TestCommandBlacklist(TestCase): def setUp(self): self.connection = object() self.handler = mock.Mock() def test_correct(self): message = ReceivedMessage(b'COMMAND\r\n') wrapped = filters.command_blacklist([b'WRONG_COMMAND'])(self.handler) wrapped(self.connection, message) self.handler.assert_called_once_with( connection=self.connection, message=message, ) def test_incorrect(self): message = ReceivedMessage(b'WRONG_COMMAND\r\n') wrapped = filters.command_blacklist([b'WRONG_COMMAND'])(self.handler) wrapped(self.connection, message) self.assertFalse(self.handler.called) class TestCommandOnly(TestCase): def setUp(self): self.connection = object() self.handler = mock.Mock() def test_correct(self): message = ReceivedMessage(b'COMMAND\r\n') wrapped = filters.command_only(b'COMMAND')(self.handler) wrapped(self.connection, message) self.handler.assert_called_once_with( connection=self.connection, message=message, ) def test_incorrect(self): message = ReceivedMessage(b'WRONG_COMMAND\r\n') wrapped = filters.command_only(b'COMMAND')(self.handler) wrapped(self.connection, message) self.assertFalse(self.handler.called) class TestCommandWhitelist(TestCase): def setUp(self): self.connection = object() self.handler = mock.Mock() def test_correct(self): message = ReceivedMessage(b'COMMAND\r\n') wrapped = filters.command_whitelist([b'COMMAND'])(self.handler) wrapped(self.connection, message) self.handler.assert_called_once_with( connection=self.connection, message=message, ) def test_incorrect(self): message = ReceivedMessage(b'WRONG_COMMAND\r\n') wrapped = filters.command_whitelist([b'COMMAND'])(self.handler) wrapped(self.connection, message) self.assertFalse(self.handler.called)
<commit_before><commit_msg>Test the new filtering decorators<commit_after>from unittest import mock, TestCase from .. import filters from ..message import ReceivedMessage class TestCommandBlacklist(TestCase): def setUp(self): self.connection = object() self.handler = mock.Mock() def test_correct(self): message = ReceivedMessage(b'COMMAND\r\n') wrapped = filters.command_blacklist([b'WRONG_COMMAND'])(self.handler) wrapped(self.connection, message) self.handler.assert_called_once_with( connection=self.connection, message=message, ) def test_incorrect(self): message = ReceivedMessage(b'WRONG_COMMAND\r\n') wrapped = filters.command_blacklist([b'WRONG_COMMAND'])(self.handler) wrapped(self.connection, message) self.assertFalse(self.handler.called) class TestCommandOnly(TestCase): def setUp(self): self.connection = object() self.handler = mock.Mock() def test_correct(self): message = ReceivedMessage(b'COMMAND\r\n') wrapped = filters.command_only(b'COMMAND')(self.handler) wrapped(self.connection, message) self.handler.assert_called_once_with( connection=self.connection, message=message, ) def test_incorrect(self): message = ReceivedMessage(b'WRONG_COMMAND\r\n') wrapped = filters.command_only(b'COMMAND')(self.handler) wrapped(self.connection, message) self.assertFalse(self.handler.called) class TestCommandWhitelist(TestCase): def setUp(self): self.connection = object() self.handler = mock.Mock() def test_correct(self): message = ReceivedMessage(b'COMMAND\r\n') wrapped = filters.command_whitelist([b'COMMAND'])(self.handler) wrapped(self.connection, message) self.handler.assert_called_once_with( connection=self.connection, message=message, ) def test_incorrect(self): message = ReceivedMessage(b'WRONG_COMMAND\r\n') wrapped = filters.command_whitelist([b'COMMAND'])(self.handler) wrapped(self.connection, message) self.assertFalse(self.handler.called)
f29dd515a853a18d5680e4f6e1fa9608c8c26076
research_pyutils/tests/tests_auxiliary.py
research_pyutils/tests/tests_auxiliary.py
import numpy as np def test_array_reshape_to_dims(): from research_pyutils import array_reshape_to_dims a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) sh = a.shape tot = sh[0] * sh[1] # # test 1 for a conversion, vectorize. b = array_reshape_to_dims(a, k=1) assert b.shape[0] == tot assert b[0] == a[0, 0] and b[-1] == a[-1, -1] assert np.all(b.reshape(sh) == a) # # test 2: convert to particular shape. b = array_reshape_to_dims(b, sh[1], sh[0], k=2) assert b.shape[0] == sh[1] and b.shape[1] == sh[0] assert np.all(b.flatten().reshape(sh) == a) assert len(b.shape) == 2 # # test 3: convert to particular shape. b = array_reshape_to_dims(b, sh[1], sh[0], k=3) assert b.shape[0] == sh[1] and b.shape[1] == sh[0] assert np.all(b.flatten().reshape(sh) == a) assert len(b.shape) == 3 # # test 4: convert to particular shape. b = array_reshape_to_dims(b, 1, tot) assert b.shape[1] == tot and b.shape[0] == 1 assert np.all(b.flatten().reshape(sh) == a) assert len(b.shape) == 2 # # reshape the original matrix a. a = a.reshape((2, 2, 2)) sh = a.shape # # test 5: convert to particular shape. b = array_reshape_to_dims(b, 1, tot) assert b.shape[1] == tot and b.shape[0] == 1 assert np.all(b.flatten().reshape(sh) == a) assert len(b.shape) == 2 # # test 6 for a conversion, vectorize. b = array_reshape_to_dims(a, k=1) assert b.shape[0] == tot assert b[0] == a[0, 0, 0] and b[-1] == a[-1, -1, -1] assert np.all(b.reshape(sh) == a)
Add the tests for auxiliary.py modules.
Add the tests for auxiliary.py modules.
Python
apache-2.0
grigorisg9gr/pyutils,grigorisg9gr/pyutils
Add the tests for auxiliary.py modules.
import numpy as np def test_array_reshape_to_dims(): from research_pyutils import array_reshape_to_dims a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) sh = a.shape tot = sh[0] * sh[1] # # test 1 for a conversion, vectorize. b = array_reshape_to_dims(a, k=1) assert b.shape[0] == tot assert b[0] == a[0, 0] and b[-1] == a[-1, -1] assert np.all(b.reshape(sh) == a) # # test 2: convert to particular shape. b = array_reshape_to_dims(b, sh[1], sh[0], k=2) assert b.shape[0] == sh[1] and b.shape[1] == sh[0] assert np.all(b.flatten().reshape(sh) == a) assert len(b.shape) == 2 # # test 3: convert to particular shape. b = array_reshape_to_dims(b, sh[1], sh[0], k=3) assert b.shape[0] == sh[1] and b.shape[1] == sh[0] assert np.all(b.flatten().reshape(sh) == a) assert len(b.shape) == 3 # # test 4: convert to particular shape. b = array_reshape_to_dims(b, 1, tot) assert b.shape[1] == tot and b.shape[0] == 1 assert np.all(b.flatten().reshape(sh) == a) assert len(b.shape) == 2 # # reshape the original matrix a. a = a.reshape((2, 2, 2)) sh = a.shape # # test 5: convert to particular shape. b = array_reshape_to_dims(b, 1, tot) assert b.shape[1] == tot and b.shape[0] == 1 assert np.all(b.flatten().reshape(sh) == a) assert len(b.shape) == 2 # # test 6 for a conversion, vectorize. b = array_reshape_to_dims(a, k=1) assert b.shape[0] == tot assert b[0] == a[0, 0, 0] and b[-1] == a[-1, -1, -1] assert np.all(b.reshape(sh) == a)
<commit_before><commit_msg>Add the tests for auxiliary.py modules.<commit_after>
import numpy as np def test_array_reshape_to_dims(): from research_pyutils import array_reshape_to_dims a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) sh = a.shape tot = sh[0] * sh[1] # # test 1 for a conversion, vectorize. b = array_reshape_to_dims(a, k=1) assert b.shape[0] == tot assert b[0] == a[0, 0] and b[-1] == a[-1, -1] assert np.all(b.reshape(sh) == a) # # test 2: convert to particular shape. b = array_reshape_to_dims(b, sh[1], sh[0], k=2) assert b.shape[0] == sh[1] and b.shape[1] == sh[0] assert np.all(b.flatten().reshape(sh) == a) assert len(b.shape) == 2 # # test 3: convert to particular shape. b = array_reshape_to_dims(b, sh[1], sh[0], k=3) assert b.shape[0] == sh[1] and b.shape[1] == sh[0] assert np.all(b.flatten().reshape(sh) == a) assert len(b.shape) == 3 # # test 4: convert to particular shape. b = array_reshape_to_dims(b, 1, tot) assert b.shape[1] == tot and b.shape[0] == 1 assert np.all(b.flatten().reshape(sh) == a) assert len(b.shape) == 2 # # reshape the original matrix a. a = a.reshape((2, 2, 2)) sh = a.shape # # test 5: convert to particular shape. b = array_reshape_to_dims(b, 1, tot) assert b.shape[1] == tot and b.shape[0] == 1 assert np.all(b.flatten().reshape(sh) == a) assert len(b.shape) == 2 # # test 6 for a conversion, vectorize. b = array_reshape_to_dims(a, k=1) assert b.shape[0] == tot assert b[0] == a[0, 0, 0] and b[-1] == a[-1, -1, -1] assert np.all(b.reshape(sh) == a)
Add the tests for auxiliary.py modules.import numpy as np def test_array_reshape_to_dims(): from research_pyutils import array_reshape_to_dims a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) sh = a.shape tot = sh[0] * sh[1] # # test 1 for a conversion, vectorize. b = array_reshape_to_dims(a, k=1) assert b.shape[0] == tot assert b[0] == a[0, 0] and b[-1] == a[-1, -1] assert np.all(b.reshape(sh) == a) # # test 2: convert to particular shape. b = array_reshape_to_dims(b, sh[1], sh[0], k=2) assert b.shape[0] == sh[1] and b.shape[1] == sh[0] assert np.all(b.flatten().reshape(sh) == a) assert len(b.shape) == 2 # # test 3: convert to particular shape. b = array_reshape_to_dims(b, sh[1], sh[0], k=3) assert b.shape[0] == sh[1] and b.shape[1] == sh[0] assert np.all(b.flatten().reshape(sh) == a) assert len(b.shape) == 3 # # test 4: convert to particular shape. b = array_reshape_to_dims(b, 1, tot) assert b.shape[1] == tot and b.shape[0] == 1 assert np.all(b.flatten().reshape(sh) == a) assert len(b.shape) == 2 # # reshape the original matrix a. a = a.reshape((2, 2, 2)) sh = a.shape # # test 5: convert to particular shape. b = array_reshape_to_dims(b, 1, tot) assert b.shape[1] == tot and b.shape[0] == 1 assert np.all(b.flatten().reshape(sh) == a) assert len(b.shape) == 2 # # test 6 for a conversion, vectorize. b = array_reshape_to_dims(a, k=1) assert b.shape[0] == tot assert b[0] == a[0, 0, 0] and b[-1] == a[-1, -1, -1] assert np.all(b.reshape(sh) == a)
<commit_before><commit_msg>Add the tests for auxiliary.py modules.<commit_after>import numpy as np def test_array_reshape_to_dims(): from research_pyutils import array_reshape_to_dims a = np.array([[1, 2], [3, 4], [5, 6], [7, 8]]) sh = a.shape tot = sh[0] * sh[1] # # test 1 for a conversion, vectorize. b = array_reshape_to_dims(a, k=1) assert b.shape[0] == tot assert b[0] == a[0, 0] and b[-1] == a[-1, -1] assert np.all(b.reshape(sh) == a) # # test 2: convert to particular shape. b = array_reshape_to_dims(b, sh[1], sh[0], k=2) assert b.shape[0] == sh[1] and b.shape[1] == sh[0] assert np.all(b.flatten().reshape(sh) == a) assert len(b.shape) == 2 # # test 3: convert to particular shape. b = array_reshape_to_dims(b, sh[1], sh[0], k=3) assert b.shape[0] == sh[1] and b.shape[1] == sh[0] assert np.all(b.flatten().reshape(sh) == a) assert len(b.shape) == 3 # # test 4: convert to particular shape. b = array_reshape_to_dims(b, 1, tot) assert b.shape[1] == tot and b.shape[0] == 1 assert np.all(b.flatten().reshape(sh) == a) assert len(b.shape) == 2 # # reshape the original matrix a. a = a.reshape((2, 2, 2)) sh = a.shape # # test 5: convert to particular shape. b = array_reshape_to_dims(b, 1, tot) assert b.shape[1] == tot and b.shape[0] == 1 assert np.all(b.flatten().reshape(sh) == a) assert len(b.shape) == 2 # # test 6 for a conversion, vectorize. b = array_reshape_to_dims(a, k=1) assert b.shape[0] == tot assert b[0] == a[0, 0, 0] and b[-1] == a[-1, -1, -1] assert np.all(b.reshape(sh) == a)
d454b9aead39db2c6836d6d0e0565decd92e85b7
bin/debug/load_timeline_for_day_and_user.py
bin/debug/load_timeline_for_day_and_user.py
import json import bson.json_util as bju import emission.core.get_database as edb import sys if __name__ == '__main__': if len(sys.argv) != 2: print "Usage: %s <filename>" % (sys.argv[0]) fn = sys.argv[1] print "Loading file " + fn entries = json.load(open(fn), object_hook = bju.object_hook) for entry in entries: edb.get_timeseries_db().save(entry)
Load the timeseries data for a particular day/user combination
Load the timeseries data for a particular day/user combination So that we can perform analysis on it
Python
bsd-3-clause
sunil07t/e-mission-server,sunil07t/e-mission-server,e-mission/e-mission-server,yw374cornell/e-mission-server,shankari/e-mission-server,yw374cornell/e-mission-server,joshzarrabi/e-mission-server,shankari/e-mission-server,e-mission/e-mission-server,sunil07t/e-mission-server,shankari/e-mission-server,sunil07t/e-mission-server,e-mission/e-mission-server,joshzarrabi/e-mission-server,yw374cornell/e-mission-server,shankari/e-mission-server,e-mission/e-mission-server,yw374cornell/e-mission-server,joshzarrabi/e-mission-server,joshzarrabi/e-mission-server
Load the timeseries data for a particular day/user combination So that we can perform analysis on it
import json import bson.json_util as bju import emission.core.get_database as edb import sys if __name__ == '__main__': if len(sys.argv) != 2: print "Usage: %s <filename>" % (sys.argv[0]) fn = sys.argv[1] print "Loading file " + fn entries = json.load(open(fn), object_hook = bju.object_hook) for entry in entries: edb.get_timeseries_db().save(entry)
<commit_before><commit_msg>Load the timeseries data for a particular day/user combination So that we can perform analysis on it<commit_after>
import json import bson.json_util as bju import emission.core.get_database as edb import sys if __name__ == '__main__': if len(sys.argv) != 2: print "Usage: %s <filename>" % (sys.argv[0]) fn = sys.argv[1] print "Loading file " + fn entries = json.load(open(fn), object_hook = bju.object_hook) for entry in entries: edb.get_timeseries_db().save(entry)
Load the timeseries data for a particular day/user combination So that we can perform analysis on itimport json import bson.json_util as bju import emission.core.get_database as edb import sys if __name__ == '__main__': if len(sys.argv) != 2: print "Usage: %s <filename>" % (sys.argv[0]) fn = sys.argv[1] print "Loading file " + fn entries = json.load(open(fn), object_hook = bju.object_hook) for entry in entries: edb.get_timeseries_db().save(entry)
<commit_before><commit_msg>Load the timeseries data for a particular day/user combination So that we can perform analysis on it<commit_after>import json import bson.json_util as bju import emission.core.get_database as edb import sys if __name__ == '__main__': if len(sys.argv) != 2: print "Usage: %s <filename>" % (sys.argv[0]) fn = sys.argv[1] print "Loading file " + fn entries = json.load(open(fn), object_hook = bju.object_hook) for entry in entries: edb.get_timeseries_db().save(entry)
cdf107ddeb9ca4f11ca5e5af59e196e752e79841
amzn.py
amzn.py
avsRequestHeader = { "messageHeader": { "deviceContext": [ { "name": "playbackState", "namespace": "AudioPlayer", "payload": { "streamId": "", "offsetInMilliseconds": "0", "playerActivity": "IDLE" } } ] }, "messageBody": { "profile": "alexa-close-talk", "locale": "en-us", "format": "audio/L16; rate=16000; channels=1" } }
Put AVS Request Header into separate module
Put AVS Request Header into separate module Signed-off-by: Mike Erdahl <e22fb0dee4cdb72a0a7b2cad0d8115432499e3b7@ti.com>
Python
mit
merdahl/AlexaBeagleBone2
Put AVS Request Header into separate module Signed-off-by: Mike Erdahl <e22fb0dee4cdb72a0a7b2cad0d8115432499e3b7@ti.com>
avsRequestHeader = { "messageHeader": { "deviceContext": [ { "name": "playbackState", "namespace": "AudioPlayer", "payload": { "streamId": "", "offsetInMilliseconds": "0", "playerActivity": "IDLE" } } ] }, "messageBody": { "profile": "alexa-close-talk", "locale": "en-us", "format": "audio/L16; rate=16000; channels=1" } }
<commit_before><commit_msg>Put AVS Request Header into separate module Signed-off-by: Mike Erdahl <e22fb0dee4cdb72a0a7b2cad0d8115432499e3b7@ti.com><commit_after>
avsRequestHeader = { "messageHeader": { "deviceContext": [ { "name": "playbackState", "namespace": "AudioPlayer", "payload": { "streamId": "", "offsetInMilliseconds": "0", "playerActivity": "IDLE" } } ] }, "messageBody": { "profile": "alexa-close-talk", "locale": "en-us", "format": "audio/L16; rate=16000; channels=1" } }
Put AVS Request Header into separate module Signed-off-by: Mike Erdahl <e22fb0dee4cdb72a0a7b2cad0d8115432499e3b7@ti.com>avsRequestHeader = { "messageHeader": { "deviceContext": [ { "name": "playbackState", "namespace": "AudioPlayer", "payload": { "streamId": "", "offsetInMilliseconds": "0", "playerActivity": "IDLE" } } ] }, "messageBody": { "profile": "alexa-close-talk", "locale": "en-us", "format": "audio/L16; rate=16000; channels=1" } }
<commit_before><commit_msg>Put AVS Request Header into separate module Signed-off-by: Mike Erdahl <e22fb0dee4cdb72a0a7b2cad0d8115432499e3b7@ti.com><commit_after>avsRequestHeader = { "messageHeader": { "deviceContext": [ { "name": "playbackState", "namespace": "AudioPlayer", "payload": { "streamId": "", "offsetInMilliseconds": "0", "playerActivity": "IDLE" } } ] }, "messageBody": { "profile": "alexa-close-talk", "locale": "en-us", "format": "audio/L16; rate=16000; channels=1" } }
72b0502e3d1c9b2e95b43a2d5c182544a5acdbd3
kala.py
kala.py
#!/usr/bin/python import json import bottle from bottle_mongo import MongoPlugin app = bottle.Bottle() app.config.load_config('settings.ini') bottle.install(MongoPlugin( uri=app.config['mongodb.uri'], db=app.config['mongodb.db'], json_mongo=True)) def _get_json(name): result = bottle.request.query.get(name) return json.loads(result) if result else None @app.route('/<collection>') def get(mongodb, collection): filter_ = _get_json('filter') projection = _get_json('projection') skip = int(bottle.request.query.get('skip', 0)) limit = int(bottle.request.query.get('limit', 100)) sort = _get_json('sort') cursor = mongodb[collection].find( filter=filter_, projection=projection, skip=skip, limit=limit, sort=sort ) return {'results': [document for document in cursor]} if __name__ == '__main__': app.run()
#!/usr/bin/python import json import bottle from bottle_mongo import MongoPlugin app = bottle.Bottle() app.config.load_config('settings.ini') app.install(MongoPlugin( uri=app.config['mongodb.uri'], db=app.config['mongodb.db'], json_mongo=True)) def _get_json(name): result = bottle.request.query.get(name) return json.loads(result) if result else None @app.route('/<collection>') def get(mongodb, collection): filter_ = _get_json('filter') projection = _get_json('projection') skip = int(bottle.request.query.get('skip', 0)) limit = int(bottle.request.query.get('limit', 100)) sort = _get_json('sort') cursor = mongodb[collection].find( filter=filter_, projection=projection, skip=skip, limit=limit, sort=sort ) return {'results': [document for document in cursor]} if __name__ == '__main__': app.run()
Install the plugin to the app.
Bugfix: Install the plugin to the app.
Python
mit
cloudbuy/kala,damoxc/kala,cheng93/kala
#!/usr/bin/python import json import bottle from bottle_mongo import MongoPlugin app = bottle.Bottle() app.config.load_config('settings.ini') bottle.install(MongoPlugin( uri=app.config['mongodb.uri'], db=app.config['mongodb.db'], json_mongo=True)) def _get_json(name): result = bottle.request.query.get(name) return json.loads(result) if result else None @app.route('/<collection>') def get(mongodb, collection): filter_ = _get_json('filter') projection = _get_json('projection') skip = int(bottle.request.query.get('skip', 0)) limit = int(bottle.request.query.get('limit', 100)) sort = _get_json('sort') cursor = mongodb[collection].find( filter=filter_, projection=projection, skip=skip, limit=limit, sort=sort ) return {'results': [document for document in cursor]} if __name__ == '__main__': app.run() Bugfix: Install the plugin to the app.
#!/usr/bin/python import json import bottle from bottle_mongo import MongoPlugin app = bottle.Bottle() app.config.load_config('settings.ini') app.install(MongoPlugin( uri=app.config['mongodb.uri'], db=app.config['mongodb.db'], json_mongo=True)) def _get_json(name): result = bottle.request.query.get(name) return json.loads(result) if result else None @app.route('/<collection>') def get(mongodb, collection): filter_ = _get_json('filter') projection = _get_json('projection') skip = int(bottle.request.query.get('skip', 0)) limit = int(bottle.request.query.get('limit', 100)) sort = _get_json('sort') cursor = mongodb[collection].find( filter=filter_, projection=projection, skip=skip, limit=limit, sort=sort ) return {'results': [document for document in cursor]} if __name__ == '__main__': app.run()
<commit_before>#!/usr/bin/python import json import bottle from bottle_mongo import MongoPlugin app = bottle.Bottle() app.config.load_config('settings.ini') bottle.install(MongoPlugin( uri=app.config['mongodb.uri'], db=app.config['mongodb.db'], json_mongo=True)) def _get_json(name): result = bottle.request.query.get(name) return json.loads(result) if result else None @app.route('/<collection>') def get(mongodb, collection): filter_ = _get_json('filter') projection = _get_json('projection') skip = int(bottle.request.query.get('skip', 0)) limit = int(bottle.request.query.get('limit', 100)) sort = _get_json('sort') cursor = mongodb[collection].find( filter=filter_, projection=projection, skip=skip, limit=limit, sort=sort ) return {'results': [document for document in cursor]} if __name__ == '__main__': app.run() <commit_msg>Bugfix: Install the plugin to the app.<commit_after>
#!/usr/bin/python import json import bottle from bottle_mongo import MongoPlugin app = bottle.Bottle() app.config.load_config('settings.ini') app.install(MongoPlugin( uri=app.config['mongodb.uri'], db=app.config['mongodb.db'], json_mongo=True)) def _get_json(name): result = bottle.request.query.get(name) return json.loads(result) if result else None @app.route('/<collection>') def get(mongodb, collection): filter_ = _get_json('filter') projection = _get_json('projection') skip = int(bottle.request.query.get('skip', 0)) limit = int(bottle.request.query.get('limit', 100)) sort = _get_json('sort') cursor = mongodb[collection].find( filter=filter_, projection=projection, skip=skip, limit=limit, sort=sort ) return {'results': [document for document in cursor]} if __name__ == '__main__': app.run()
#!/usr/bin/python import json import bottle from bottle_mongo import MongoPlugin app = bottle.Bottle() app.config.load_config('settings.ini') bottle.install(MongoPlugin( uri=app.config['mongodb.uri'], db=app.config['mongodb.db'], json_mongo=True)) def _get_json(name): result = bottle.request.query.get(name) return json.loads(result) if result else None @app.route('/<collection>') def get(mongodb, collection): filter_ = _get_json('filter') projection = _get_json('projection') skip = int(bottle.request.query.get('skip', 0)) limit = int(bottle.request.query.get('limit', 100)) sort = _get_json('sort') cursor = mongodb[collection].find( filter=filter_, projection=projection, skip=skip, limit=limit, sort=sort ) return {'results': [document for document in cursor]} if __name__ == '__main__': app.run() Bugfix: Install the plugin to the app.#!/usr/bin/python import json import bottle from bottle_mongo import MongoPlugin app = bottle.Bottle() app.config.load_config('settings.ini') app.install(MongoPlugin( uri=app.config['mongodb.uri'], db=app.config['mongodb.db'], json_mongo=True)) def _get_json(name): result = bottle.request.query.get(name) return json.loads(result) if result else None @app.route('/<collection>') def get(mongodb, collection): filter_ = _get_json('filter') projection = _get_json('projection') skip = int(bottle.request.query.get('skip', 0)) limit = int(bottle.request.query.get('limit', 100)) sort = _get_json('sort') cursor = mongodb[collection].find( filter=filter_, projection=projection, skip=skip, limit=limit, sort=sort ) return {'results': [document for document in cursor]} if __name__ == '__main__': app.run()
<commit_before>#!/usr/bin/python import json import bottle from bottle_mongo import MongoPlugin app = bottle.Bottle() app.config.load_config('settings.ini') bottle.install(MongoPlugin( uri=app.config['mongodb.uri'], db=app.config['mongodb.db'], json_mongo=True)) def _get_json(name): result = bottle.request.query.get(name) return json.loads(result) if result else None @app.route('/<collection>') def get(mongodb, collection): filter_ = _get_json('filter') projection = _get_json('projection') skip = int(bottle.request.query.get('skip', 0)) limit = int(bottle.request.query.get('limit', 100)) sort = _get_json('sort') cursor = mongodb[collection].find( filter=filter_, projection=projection, skip=skip, limit=limit, sort=sort ) return {'results': [document for document in cursor]} if __name__ == '__main__': app.run() <commit_msg>Bugfix: Install the plugin to the app.<commit_after>#!/usr/bin/python import json import bottle from bottle_mongo import MongoPlugin app = bottle.Bottle() app.config.load_config('settings.ini') app.install(MongoPlugin( uri=app.config['mongodb.uri'], db=app.config['mongodb.db'], json_mongo=True)) def _get_json(name): result = bottle.request.query.get(name) return json.loads(result) if result else None @app.route('/<collection>') def get(mongodb, collection): filter_ = _get_json('filter') projection = _get_json('projection') skip = int(bottle.request.query.get('skip', 0)) limit = int(bottle.request.query.get('limit', 100)) sort = _get_json('sort') cursor = mongodb[collection].find( filter=filter_, projection=projection, skip=skip, limit=limit, sort=sort ) return {'results': [document for document in cursor]} if __name__ == '__main__': app.run()
822bc71bc8153db2b403bcaadb65755e7563ddb2
src/mmw/apps/modeling/migrations/0020_old_scenarios.py
src/mmw/apps/modeling/migrations/0020_old_scenarios.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations def clear_old_scenario_results(apps, schema_editor): Scenario = apps.get_model('modeling', 'Scenario') old_scenarios = Scenario.objects.filter( project__model_package='tr-55' ).filter( results__contains='"result": {"pc_modified"' ) for scenario in old_scenarios: scenario.results = '[]' scenario.modification_hash = '' scenario.save() class Migration(migrations.Migration): dependencies = [ ('modeling', '0019_project_gis_data'), ] operations = [ migrations.RunPython(clear_old_scenario_results) ]
Clear outdated cached results in old scenarios
Clear outdated cached results in old scenarios Their values will be reclalculated and cached in the new format by the front-end whenever they are loaded next.
Python
apache-2.0
kdeloach/model-my-watershed,kdeloach/model-my-watershed,WikiWatershed/model-my-watershed,kdeloach/model-my-watershed,WikiWatershed/model-my-watershed,kdeloach/model-my-watershed,WikiWatershed/model-my-watershed,WikiWatershed/model-my-watershed,kdeloach/model-my-watershed,WikiWatershed/model-my-watershed
Clear outdated cached results in old scenarios Their values will be reclalculated and cached in the new format by the front-end whenever they are loaded next.
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations def clear_old_scenario_results(apps, schema_editor): Scenario = apps.get_model('modeling', 'Scenario') old_scenarios = Scenario.objects.filter( project__model_package='tr-55' ).filter( results__contains='"result": {"pc_modified"' ) for scenario in old_scenarios: scenario.results = '[]' scenario.modification_hash = '' scenario.save() class Migration(migrations.Migration): dependencies = [ ('modeling', '0019_project_gis_data'), ] operations = [ migrations.RunPython(clear_old_scenario_results) ]
<commit_before><commit_msg>Clear outdated cached results in old scenarios Their values will be reclalculated and cached in the new format by the front-end whenever they are loaded next.<commit_after>
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations def clear_old_scenario_results(apps, schema_editor): Scenario = apps.get_model('modeling', 'Scenario') old_scenarios = Scenario.objects.filter( project__model_package='tr-55' ).filter( results__contains='"result": {"pc_modified"' ) for scenario in old_scenarios: scenario.results = '[]' scenario.modification_hash = '' scenario.save() class Migration(migrations.Migration): dependencies = [ ('modeling', '0019_project_gis_data'), ] operations = [ migrations.RunPython(clear_old_scenario_results) ]
Clear outdated cached results in old scenarios Their values will be reclalculated and cached in the new format by the front-end whenever they are loaded next.# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations def clear_old_scenario_results(apps, schema_editor): Scenario = apps.get_model('modeling', 'Scenario') old_scenarios = Scenario.objects.filter( project__model_package='tr-55' ).filter( results__contains='"result": {"pc_modified"' ) for scenario in old_scenarios: scenario.results = '[]' scenario.modification_hash = '' scenario.save() class Migration(migrations.Migration): dependencies = [ ('modeling', '0019_project_gis_data'), ] operations = [ migrations.RunPython(clear_old_scenario_results) ]
<commit_before><commit_msg>Clear outdated cached results in old scenarios Their values will be reclalculated and cached in the new format by the front-end whenever they are loaded next.<commit_after># -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations def clear_old_scenario_results(apps, schema_editor): Scenario = apps.get_model('modeling', 'Scenario') old_scenarios = Scenario.objects.filter( project__model_package='tr-55' ).filter( results__contains='"result": {"pc_modified"' ) for scenario in old_scenarios: scenario.results = '[]' scenario.modification_hash = '' scenario.save() class Migration(migrations.Migration): dependencies = [ ('modeling', '0019_project_gis_data'), ] operations = [ migrations.RunPython(clear_old_scenario_results) ]
629459d8b8780e4ecca3104034cdeccdd7cccfb3
src/olympia/yara/migrations/0003_auto_20191010_1446.py
src/olympia/yara/migrations/0003_auto_20191010_1446.py
from django.db import migrations from django.db.models import Case, Value, When def backfill_has_matches(apps, schema_editor): YaraResult = apps.get_model('yara', 'YaraResult') YaraResult.objects.filter(has_matches=None).update( has_matches=Case( When(matches='[]', then=Value(False)), default=Value(True) ) ) class Migration(migrations.Migration): dependencies = [ ('yara', '0002_auto_20191009_1239'), ] operations = [ migrations.RunPython(backfill_has_matches), ]
Add data migration to backfill YaraResult.has_matches
Add data migration to backfill YaraResult.has_matches
Python
bsd-3-clause
bqbn/addons-server,mozilla/addons-server,psiinon/addons-server,mozilla/addons-server,wagnerand/addons-server,mozilla/olympia,mozilla/olympia,psiinon/addons-server,wagnerand/addons-server,psiinon/addons-server,bqbn/addons-server,diox/olympia,mozilla/olympia,bqbn/addons-server,mozilla/addons-server,eviljeff/olympia,eviljeff/olympia,wagnerand/addons-server,mozilla/olympia,diox/olympia,mozilla/addons-server,bqbn/addons-server,eviljeff/olympia,diox/olympia,diox/olympia,eviljeff/olympia,psiinon/addons-server,wagnerand/addons-server
Add data migration to backfill YaraResult.has_matches
from django.db import migrations from django.db.models import Case, Value, When def backfill_has_matches(apps, schema_editor): YaraResult = apps.get_model('yara', 'YaraResult') YaraResult.objects.filter(has_matches=None).update( has_matches=Case( When(matches='[]', then=Value(False)), default=Value(True) ) ) class Migration(migrations.Migration): dependencies = [ ('yara', '0002_auto_20191009_1239'), ] operations = [ migrations.RunPython(backfill_has_matches), ]
<commit_before><commit_msg>Add data migration to backfill YaraResult.has_matches<commit_after>
from django.db import migrations from django.db.models import Case, Value, When def backfill_has_matches(apps, schema_editor): YaraResult = apps.get_model('yara', 'YaraResult') YaraResult.objects.filter(has_matches=None).update( has_matches=Case( When(matches='[]', then=Value(False)), default=Value(True) ) ) class Migration(migrations.Migration): dependencies = [ ('yara', '0002_auto_20191009_1239'), ] operations = [ migrations.RunPython(backfill_has_matches), ]
Add data migration to backfill YaraResult.has_matchesfrom django.db import migrations from django.db.models import Case, Value, When def backfill_has_matches(apps, schema_editor): YaraResult = apps.get_model('yara', 'YaraResult') YaraResult.objects.filter(has_matches=None).update( has_matches=Case( When(matches='[]', then=Value(False)), default=Value(True) ) ) class Migration(migrations.Migration): dependencies = [ ('yara', '0002_auto_20191009_1239'), ] operations = [ migrations.RunPython(backfill_has_matches), ]
<commit_before><commit_msg>Add data migration to backfill YaraResult.has_matches<commit_after>from django.db import migrations from django.db.models import Case, Value, When def backfill_has_matches(apps, schema_editor): YaraResult = apps.get_model('yara', 'YaraResult') YaraResult.objects.filter(has_matches=None).update( has_matches=Case( When(matches='[]', then=Value(False)), default=Value(True) ) ) class Migration(migrations.Migration): dependencies = [ ('yara', '0002_auto_20191009_1239'), ] operations = [ migrations.RunPython(backfill_has_matches), ]
92dda83e9fe2c33639867c35a97c2b536e53278c
prep.py
prep.py
from os import listdir from os.path import join def file_paths(data_path): return [join(data_path, name) for name in listdir(data_path)] def training_data(data_path): paths = file_paths(data_path) raw_text = [ open(path, 'r').read() for path in paths] dataX = [] dataY = [] for text in raw_text: data = split_data(text) dataX.append(data[0]) dataY.append(data[1]) return dataX, dataY # split inputs and outputs from data def split_data(text): lines = text.split('\n') # first line without first character (#) input_text = lines.pop(0)[1:] # the rest of the text output_text = '\n'.join(lines) return input_text, output_text
Read data and split inputs and outputs
Read data and split inputs and outputs
Python
mit
vdragan1993/python-coder
Read data and split inputs and outputs
from os import listdir from os.path import join def file_paths(data_path): return [join(data_path, name) for name in listdir(data_path)] def training_data(data_path): paths = file_paths(data_path) raw_text = [ open(path, 'r').read() for path in paths] dataX = [] dataY = [] for text in raw_text: data = split_data(text) dataX.append(data[0]) dataY.append(data[1]) return dataX, dataY # split inputs and outputs from data def split_data(text): lines = text.split('\n') # first line without first character (#) input_text = lines.pop(0)[1:] # the rest of the text output_text = '\n'.join(lines) return input_text, output_text
<commit_before><commit_msg>Read data and split inputs and outputs<commit_after>
from os import listdir from os.path import join def file_paths(data_path): return [join(data_path, name) for name in listdir(data_path)] def training_data(data_path): paths = file_paths(data_path) raw_text = [ open(path, 'r').read() for path in paths] dataX = [] dataY = [] for text in raw_text: data = split_data(text) dataX.append(data[0]) dataY.append(data[1]) return dataX, dataY # split inputs and outputs from data def split_data(text): lines = text.split('\n') # first line without first character (#) input_text = lines.pop(0)[1:] # the rest of the text output_text = '\n'.join(lines) return input_text, output_text
Read data and split inputs and outputsfrom os import listdir from os.path import join def file_paths(data_path): return [join(data_path, name) for name in listdir(data_path)] def training_data(data_path): paths = file_paths(data_path) raw_text = [ open(path, 'r').read() for path in paths] dataX = [] dataY = [] for text in raw_text: data = split_data(text) dataX.append(data[0]) dataY.append(data[1]) return dataX, dataY # split inputs and outputs from data def split_data(text): lines = text.split('\n') # first line without first character (#) input_text = lines.pop(0)[1:] # the rest of the text output_text = '\n'.join(lines) return input_text, output_text
<commit_before><commit_msg>Read data and split inputs and outputs<commit_after>from os import listdir from os.path import join def file_paths(data_path): return [join(data_path, name) for name in listdir(data_path)] def training_data(data_path): paths = file_paths(data_path) raw_text = [ open(path, 'r').read() for path in paths] dataX = [] dataY = [] for text in raw_text: data = split_data(text) dataX.append(data[0]) dataY.append(data[1]) return dataX, dataY # split inputs and outputs from data def split_data(text): lines = text.split('\n') # first line without first character (#) input_text = lines.pop(0)[1:] # the rest of the text output_text = '\n'.join(lines) return input_text, output_text
597000dac85ef0760e04f3c6d885bde531fa86a2
Lib/test/crashers/decref_before_assignment.py
Lib/test/crashers/decref_before_assignment.py
""" General example for an attack against code like this: Py_DECREF(obj->attr); obj->attr = ...; here in Module/_json.c:scanner_init(). Explanation: if the first Py_DECREF() calls either a __del__ or a weakref callback, it will run while the 'obj' appears to have in 'obj->attr' still the old reference to the object, but not holding the reference count any more. Status: progress has been made replacing these cases, but there is an infinite number of such cases. """ import _json, weakref class Ctx1(object): encoding = "utf8" strict = None object_hook = None object_pairs_hook = None parse_float = None parse_int = None parse_constant = None class Foo(unicode): pass def delete_me(*args): print scanner.encoding.__dict__ class Ctx2(Ctx1): @property def encoding(self): global wref f = Foo("utf8") f.abc = globals() wref = weakref.ref(f, delete_me) return f scanner = _json.make_scanner(Ctx1()) scanner.__init__(Ctx2())
Add a crasher for the documented issue of calling "Py_DECREF(self->xxx)";
Add a crasher for the documented issue of calling "Py_DECREF(self->xxx)";
Python
mit
sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator
Add a crasher for the documented issue of calling "Py_DECREF(self->xxx)";
""" General example for an attack against code like this: Py_DECREF(obj->attr); obj->attr = ...; here in Module/_json.c:scanner_init(). Explanation: if the first Py_DECREF() calls either a __del__ or a weakref callback, it will run while the 'obj' appears to have in 'obj->attr' still the old reference to the object, but not holding the reference count any more. Status: progress has been made replacing these cases, but there is an infinite number of such cases. """ import _json, weakref class Ctx1(object): encoding = "utf8" strict = None object_hook = None object_pairs_hook = None parse_float = None parse_int = None parse_constant = None class Foo(unicode): pass def delete_me(*args): print scanner.encoding.__dict__ class Ctx2(Ctx1): @property def encoding(self): global wref f = Foo("utf8") f.abc = globals() wref = weakref.ref(f, delete_me) return f scanner = _json.make_scanner(Ctx1()) scanner.__init__(Ctx2())
<commit_before><commit_msg>Add a crasher for the documented issue of calling "Py_DECREF(self->xxx)";<commit_after>
""" General example for an attack against code like this: Py_DECREF(obj->attr); obj->attr = ...; here in Module/_json.c:scanner_init(). Explanation: if the first Py_DECREF() calls either a __del__ or a weakref callback, it will run while the 'obj' appears to have in 'obj->attr' still the old reference to the object, but not holding the reference count any more. Status: progress has been made replacing these cases, but there is an infinite number of such cases. """ import _json, weakref class Ctx1(object): encoding = "utf8" strict = None object_hook = None object_pairs_hook = None parse_float = None parse_int = None parse_constant = None class Foo(unicode): pass def delete_me(*args): print scanner.encoding.__dict__ class Ctx2(Ctx1): @property def encoding(self): global wref f = Foo("utf8") f.abc = globals() wref = weakref.ref(f, delete_me) return f scanner = _json.make_scanner(Ctx1()) scanner.__init__(Ctx2())
Add a crasher for the documented issue of calling "Py_DECREF(self->xxx)";""" General example for an attack against code like this: Py_DECREF(obj->attr); obj->attr = ...; here in Module/_json.c:scanner_init(). Explanation: if the first Py_DECREF() calls either a __del__ or a weakref callback, it will run while the 'obj' appears to have in 'obj->attr' still the old reference to the object, but not holding the reference count any more. Status: progress has been made replacing these cases, but there is an infinite number of such cases. """ import _json, weakref class Ctx1(object): encoding = "utf8" strict = None object_hook = None object_pairs_hook = None parse_float = None parse_int = None parse_constant = None class Foo(unicode): pass def delete_me(*args): print scanner.encoding.__dict__ class Ctx2(Ctx1): @property def encoding(self): global wref f = Foo("utf8") f.abc = globals() wref = weakref.ref(f, delete_me) return f scanner = _json.make_scanner(Ctx1()) scanner.__init__(Ctx2())
<commit_before><commit_msg>Add a crasher for the documented issue of calling "Py_DECREF(self->xxx)";<commit_after>""" General example for an attack against code like this: Py_DECREF(obj->attr); obj->attr = ...; here in Module/_json.c:scanner_init(). Explanation: if the first Py_DECREF() calls either a __del__ or a weakref callback, it will run while the 'obj' appears to have in 'obj->attr' still the old reference to the object, but not holding the reference count any more. Status: progress has been made replacing these cases, but there is an infinite number of such cases. """ import _json, weakref class Ctx1(object): encoding = "utf8" strict = None object_hook = None object_pairs_hook = None parse_float = None parse_int = None parse_constant = None class Foo(unicode): pass def delete_me(*args): print scanner.encoding.__dict__ class Ctx2(Ctx1): @property def encoding(self): global wref f = Foo("utf8") f.abc = globals() wref = weakref.ref(f, delete_me) return f scanner = _json.make_scanner(Ctx1()) scanner.__init__(Ctx2())
aaa546cf6a0ba15a0d1b4a8d7ad5b3c8d349f9b8
tidy.py
tidy.py
#!/usr/bin/env python ''' Script for reflowing C code. This script is intended to be run on arbitrarily messy - but valid - C code, which it then attempts to reformat into something more readable. Usage is pretty straightforward. Note that you will need a recent version of Clang to avoid common bugs in its AST printing mode. ''' import re, subprocess, sys def main(): for h in ['--help', '-h', '-?']: if h in sys.argv[1:]: print 'Usage: %s args...\n' \ ' Any arguments will be interpreted as parameters to the Clang frontend.\n' \ ' E.g. -triple arm-none-eabi if you are parsing ARM code.' % sys.argv[0] return 0 try: output = subprocess.check_output( ['clang', '-cc1', '-ast-print'] + sys.argv[1:]) except OSError: print >>sys.stderr, 'Clang not found' return -1 except subprocess.CalledProcessError as e: # Clang threw an error return e.returncode # http://llvm.org/bugs/show_bug.cgi?id=21106 output = re.sub(r'(\s)register\s+([^ ]+)\s+([^ ]+)\s*=\s*(.+)\s+(asm\(".*"\));', r'\1register \2 \3 \5 = \4;', output) # http://lists.cs.uiuc.edu/pipermail/cfe-dev/2014-September/039366.html # The algorithm below isn't bulletproof, but it's good enough for our # purposes for now. offset = 0 asm_re = re.compile(r'(\sasm\s+(?:volatile\s*)?)\((.*)\);') arg_re = re.compile(r'(".*?"\s)([^,:]+)\s?([,:])') while True: asm_block = asm_re.search(output, offset) if asm_block is None: break prefix = asm_block.group(1) args = arg_re.sub(r'\1(\2) \3', asm_block.group(2)) replacement = '%s (%s);' % (prefix, args) output = output[:asm_block.start()] + replacement + output[asm_block.end():] offset = asm_block.start() + len(replacement) print output return 0 if __name__ == '__main__': sys.exit(main())
Add a tool for cleaning up the messy C output by the pruner.
Add a tool for cleaning up the messy C output by the pruner.
Python
bsd-2-clause
seL4/pruner
Add a tool for cleaning up the messy C output by the pruner.
#!/usr/bin/env python ''' Script for reflowing C code. This script is intended to be run on arbitrarily messy - but valid - C code, which it then attempts to reformat into something more readable. Usage is pretty straightforward. Note that you will need a recent version of Clang to avoid common bugs in its AST printing mode. ''' import re, subprocess, sys def main(): for h in ['--help', '-h', '-?']: if h in sys.argv[1:]: print 'Usage: %s args...\n' \ ' Any arguments will be interpreted as parameters to the Clang frontend.\n' \ ' E.g. -triple arm-none-eabi if you are parsing ARM code.' % sys.argv[0] return 0 try: output = subprocess.check_output( ['clang', '-cc1', '-ast-print'] + sys.argv[1:]) except OSError: print >>sys.stderr, 'Clang not found' return -1 except subprocess.CalledProcessError as e: # Clang threw an error return e.returncode # http://llvm.org/bugs/show_bug.cgi?id=21106 output = re.sub(r'(\s)register\s+([^ ]+)\s+([^ ]+)\s*=\s*(.+)\s+(asm\(".*"\));', r'\1register \2 \3 \5 = \4;', output) # http://lists.cs.uiuc.edu/pipermail/cfe-dev/2014-September/039366.html # The algorithm below isn't bulletproof, but it's good enough for our # purposes for now. offset = 0 asm_re = re.compile(r'(\sasm\s+(?:volatile\s*)?)\((.*)\);') arg_re = re.compile(r'(".*?"\s)([^,:]+)\s?([,:])') while True: asm_block = asm_re.search(output, offset) if asm_block is None: break prefix = asm_block.group(1) args = arg_re.sub(r'\1(\2) \3', asm_block.group(2)) replacement = '%s (%s);' % (prefix, args) output = output[:asm_block.start()] + replacement + output[asm_block.end():] offset = asm_block.start() + len(replacement) print output return 0 if __name__ == '__main__': sys.exit(main())
<commit_before><commit_msg>Add a tool for cleaning up the messy C output by the pruner.<commit_after>
#!/usr/bin/env python ''' Script for reflowing C code. This script is intended to be run on arbitrarily messy - but valid - C code, which it then attempts to reformat into something more readable. Usage is pretty straightforward. Note that you will need a recent version of Clang to avoid common bugs in its AST printing mode. ''' import re, subprocess, sys def main(): for h in ['--help', '-h', '-?']: if h in sys.argv[1:]: print 'Usage: %s args...\n' \ ' Any arguments will be interpreted as parameters to the Clang frontend.\n' \ ' E.g. -triple arm-none-eabi if you are parsing ARM code.' % sys.argv[0] return 0 try: output = subprocess.check_output( ['clang', '-cc1', '-ast-print'] + sys.argv[1:]) except OSError: print >>sys.stderr, 'Clang not found' return -1 except subprocess.CalledProcessError as e: # Clang threw an error return e.returncode # http://llvm.org/bugs/show_bug.cgi?id=21106 output = re.sub(r'(\s)register\s+([^ ]+)\s+([^ ]+)\s*=\s*(.+)\s+(asm\(".*"\));', r'\1register \2 \3 \5 = \4;', output) # http://lists.cs.uiuc.edu/pipermail/cfe-dev/2014-September/039366.html # The algorithm below isn't bulletproof, but it's good enough for our # purposes for now. offset = 0 asm_re = re.compile(r'(\sasm\s+(?:volatile\s*)?)\((.*)\);') arg_re = re.compile(r'(".*?"\s)([^,:]+)\s?([,:])') while True: asm_block = asm_re.search(output, offset) if asm_block is None: break prefix = asm_block.group(1) args = arg_re.sub(r'\1(\2) \3', asm_block.group(2)) replacement = '%s (%s);' % (prefix, args) output = output[:asm_block.start()] + replacement + output[asm_block.end():] offset = asm_block.start() + len(replacement) print output return 0 if __name__ == '__main__': sys.exit(main())
Add a tool for cleaning up the messy C output by the pruner.#!/usr/bin/env python ''' Script for reflowing C code. This script is intended to be run on arbitrarily messy - but valid - C code, which it then attempts to reformat into something more readable. Usage is pretty straightforward. Note that you will need a recent version of Clang to avoid common bugs in its AST printing mode. ''' import re, subprocess, sys def main(): for h in ['--help', '-h', '-?']: if h in sys.argv[1:]: print 'Usage: %s args...\n' \ ' Any arguments will be interpreted as parameters to the Clang frontend.\n' \ ' E.g. -triple arm-none-eabi if you are parsing ARM code.' % sys.argv[0] return 0 try: output = subprocess.check_output( ['clang', '-cc1', '-ast-print'] + sys.argv[1:]) except OSError: print >>sys.stderr, 'Clang not found' return -1 except subprocess.CalledProcessError as e: # Clang threw an error return e.returncode # http://llvm.org/bugs/show_bug.cgi?id=21106 output = re.sub(r'(\s)register\s+([^ ]+)\s+([^ ]+)\s*=\s*(.+)\s+(asm\(".*"\));', r'\1register \2 \3 \5 = \4;', output) # http://lists.cs.uiuc.edu/pipermail/cfe-dev/2014-September/039366.html # The algorithm below isn't bulletproof, but it's good enough for our # purposes for now. offset = 0 asm_re = re.compile(r'(\sasm\s+(?:volatile\s*)?)\((.*)\);') arg_re = re.compile(r'(".*?"\s)([^,:]+)\s?([,:])') while True: asm_block = asm_re.search(output, offset) if asm_block is None: break prefix = asm_block.group(1) args = arg_re.sub(r'\1(\2) \3', asm_block.group(2)) replacement = '%s (%s);' % (prefix, args) output = output[:asm_block.start()] + replacement + output[asm_block.end():] offset = asm_block.start() + len(replacement) print output return 0 if __name__ == '__main__': sys.exit(main())
<commit_before><commit_msg>Add a tool for cleaning up the messy C output by the pruner.<commit_after>#!/usr/bin/env python ''' Script for reflowing C code. This script is intended to be run on arbitrarily messy - but valid - C code, which it then attempts to reformat into something more readable. Usage is pretty straightforward. Note that you will need a recent version of Clang to avoid common bugs in its AST printing mode. ''' import re, subprocess, sys def main(): for h in ['--help', '-h', '-?']: if h in sys.argv[1:]: print 'Usage: %s args...\n' \ ' Any arguments will be interpreted as parameters to the Clang frontend.\n' \ ' E.g. -triple arm-none-eabi if you are parsing ARM code.' % sys.argv[0] return 0 try: output = subprocess.check_output( ['clang', '-cc1', '-ast-print'] + sys.argv[1:]) except OSError: print >>sys.stderr, 'Clang not found' return -1 except subprocess.CalledProcessError as e: # Clang threw an error return e.returncode # http://llvm.org/bugs/show_bug.cgi?id=21106 output = re.sub(r'(\s)register\s+([^ ]+)\s+([^ ]+)\s*=\s*(.+)\s+(asm\(".*"\));', r'\1register \2 \3 \5 = \4;', output) # http://lists.cs.uiuc.edu/pipermail/cfe-dev/2014-September/039366.html # The algorithm below isn't bulletproof, but it's good enough for our # purposes for now. offset = 0 asm_re = re.compile(r'(\sasm\s+(?:volatile\s*)?)\((.*)\);') arg_re = re.compile(r'(".*?"\s)([^,:]+)\s?([,:])') while True: asm_block = asm_re.search(output, offset) if asm_block is None: break prefix = asm_block.group(1) args = arg_re.sub(r'\1(\2) \3', asm_block.group(2)) replacement = '%s (%s);' % (prefix, args) output = output[:asm_block.start()] + replacement + output[asm_block.end():] offset = asm_block.start() + len(replacement) print output return 0 if __name__ == '__main__': sys.exit(main())
15a4b183c8c48d1960a23d8efa09eee42f882c14
tools/skp/page_sets/skia_micrographygirlsvg_desktop.py
tools/skp/page_sets/skia_micrographygirlsvg_desktop.py
# Copyright 2020 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # pylint: disable=W0401,W0614 from telemetry import story from telemetry.page import page as page_module from telemetry.page import shared_page_state class SkiaBuildbotDesktopPage(page_module.Page): def __init__(self, url, page_set): super(SkiaBuildbotDesktopPage, self).__init__( url=url, name=url, page_set=page_set, shared_page_state_class=shared_page_state.SharedDesktopPageState) self.archive_data_file = 'data/skia_micrographygirlsvg_desktop.json' def RunNavigateSteps(self, action_runner): action_runner.Navigate(self.url) action_runner.Wait(15) class SkiaMicrographygirlsvgDesktopPageSet(story.StorySet): def __init__(self): super(SkiaMicrographygirlsvgDesktopPageSet, self).__init__( archive_data_file='data/skia_micrographygirlsvg_desktop.json') urls_list = [ # Why: skbug.com/10752 'https://storage.googleapis.com/skia-recreateskps-hosted-pages/micrography.svg', ] for url in urls_list: self.AddStory(SkiaBuildbotDesktopPage(url, self))
Add digital micrography svg to the RecreateSKPs bot
Add digital micrography svg to the RecreateSKPs bot Bug: skia:10752 Change-Id: I15d6bb189d44eddbd8bbe2133a2bd705f5313a75 Reviewed-on: https://skia-review.googlesource.com/c/skia/+/319028 Reviewed-by: Chris Dalton <ef7fc3a08ada7d31f16e9a18b5f3c728256e041e@google.com> Commit-Queue: Ravi Mistry <9fa2e7438b8cb730f96b74865492597170561628@google.com>
Python
bsd-3-clause
aosp-mirror/platform_external_skia,aosp-mirror/platform_external_skia,aosp-mirror/platform_external_skia,aosp-mirror/platform_external_skia,google/skia,google/skia,aosp-mirror/platform_external_skia,google/skia,aosp-mirror/platform_external_skia,google/skia,aosp-mirror/platform_external_skia,aosp-mirror/platform_external_skia,google/skia,google/skia,google/skia,google/skia,aosp-mirror/platform_external_skia,google/skia,aosp-mirror/platform_external_skia,google/skia
Add digital micrography svg to the RecreateSKPs bot Bug: skia:10752 Change-Id: I15d6bb189d44eddbd8bbe2133a2bd705f5313a75 Reviewed-on: https://skia-review.googlesource.com/c/skia/+/319028 Reviewed-by: Chris Dalton <ef7fc3a08ada7d31f16e9a18b5f3c728256e041e@google.com> Commit-Queue: Ravi Mistry <9fa2e7438b8cb730f96b74865492597170561628@google.com>
# Copyright 2020 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # pylint: disable=W0401,W0614 from telemetry import story from telemetry.page import page as page_module from telemetry.page import shared_page_state class SkiaBuildbotDesktopPage(page_module.Page): def __init__(self, url, page_set): super(SkiaBuildbotDesktopPage, self).__init__( url=url, name=url, page_set=page_set, shared_page_state_class=shared_page_state.SharedDesktopPageState) self.archive_data_file = 'data/skia_micrographygirlsvg_desktop.json' def RunNavigateSteps(self, action_runner): action_runner.Navigate(self.url) action_runner.Wait(15) class SkiaMicrographygirlsvgDesktopPageSet(story.StorySet): def __init__(self): super(SkiaMicrographygirlsvgDesktopPageSet, self).__init__( archive_data_file='data/skia_micrographygirlsvg_desktop.json') urls_list = [ # Why: skbug.com/10752 'https://storage.googleapis.com/skia-recreateskps-hosted-pages/micrography.svg', ] for url in urls_list: self.AddStory(SkiaBuildbotDesktopPage(url, self))
<commit_before><commit_msg>Add digital micrography svg to the RecreateSKPs bot Bug: skia:10752 Change-Id: I15d6bb189d44eddbd8bbe2133a2bd705f5313a75 Reviewed-on: https://skia-review.googlesource.com/c/skia/+/319028 Reviewed-by: Chris Dalton <ef7fc3a08ada7d31f16e9a18b5f3c728256e041e@google.com> Commit-Queue: Ravi Mistry <9fa2e7438b8cb730f96b74865492597170561628@google.com><commit_after>
# Copyright 2020 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # pylint: disable=W0401,W0614 from telemetry import story from telemetry.page import page as page_module from telemetry.page import shared_page_state class SkiaBuildbotDesktopPage(page_module.Page): def __init__(self, url, page_set): super(SkiaBuildbotDesktopPage, self).__init__( url=url, name=url, page_set=page_set, shared_page_state_class=shared_page_state.SharedDesktopPageState) self.archive_data_file = 'data/skia_micrographygirlsvg_desktop.json' def RunNavigateSteps(self, action_runner): action_runner.Navigate(self.url) action_runner.Wait(15) class SkiaMicrographygirlsvgDesktopPageSet(story.StorySet): def __init__(self): super(SkiaMicrographygirlsvgDesktopPageSet, self).__init__( archive_data_file='data/skia_micrographygirlsvg_desktop.json') urls_list = [ # Why: skbug.com/10752 'https://storage.googleapis.com/skia-recreateskps-hosted-pages/micrography.svg', ] for url in urls_list: self.AddStory(SkiaBuildbotDesktopPage(url, self))
Add digital micrography svg to the RecreateSKPs bot Bug: skia:10752 Change-Id: I15d6bb189d44eddbd8bbe2133a2bd705f5313a75 Reviewed-on: https://skia-review.googlesource.com/c/skia/+/319028 Reviewed-by: Chris Dalton <ef7fc3a08ada7d31f16e9a18b5f3c728256e041e@google.com> Commit-Queue: Ravi Mistry <9fa2e7438b8cb730f96b74865492597170561628@google.com># Copyright 2020 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # pylint: disable=W0401,W0614 from telemetry import story from telemetry.page import page as page_module from telemetry.page import shared_page_state class SkiaBuildbotDesktopPage(page_module.Page): def __init__(self, url, page_set): super(SkiaBuildbotDesktopPage, self).__init__( url=url, name=url, page_set=page_set, shared_page_state_class=shared_page_state.SharedDesktopPageState) self.archive_data_file = 'data/skia_micrographygirlsvg_desktop.json' def RunNavigateSteps(self, action_runner): action_runner.Navigate(self.url) action_runner.Wait(15) class SkiaMicrographygirlsvgDesktopPageSet(story.StorySet): def __init__(self): super(SkiaMicrographygirlsvgDesktopPageSet, self).__init__( archive_data_file='data/skia_micrographygirlsvg_desktop.json') urls_list = [ # Why: skbug.com/10752 'https://storage.googleapis.com/skia-recreateskps-hosted-pages/micrography.svg', ] for url in urls_list: self.AddStory(SkiaBuildbotDesktopPage(url, self))
<commit_before><commit_msg>Add digital micrography svg to the RecreateSKPs bot Bug: skia:10752 Change-Id: I15d6bb189d44eddbd8bbe2133a2bd705f5313a75 Reviewed-on: https://skia-review.googlesource.com/c/skia/+/319028 Reviewed-by: Chris Dalton <ef7fc3a08ada7d31f16e9a18b5f3c728256e041e@google.com> Commit-Queue: Ravi Mistry <9fa2e7438b8cb730f96b74865492597170561628@google.com><commit_after># Copyright 2020 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # pylint: disable=W0401,W0614 from telemetry import story from telemetry.page import page as page_module from telemetry.page import shared_page_state class SkiaBuildbotDesktopPage(page_module.Page): def __init__(self, url, page_set): super(SkiaBuildbotDesktopPage, self).__init__( url=url, name=url, page_set=page_set, shared_page_state_class=shared_page_state.SharedDesktopPageState) self.archive_data_file = 'data/skia_micrographygirlsvg_desktop.json' def RunNavigateSteps(self, action_runner): action_runner.Navigate(self.url) action_runner.Wait(15) class SkiaMicrographygirlsvgDesktopPageSet(story.StorySet): def __init__(self): super(SkiaMicrographygirlsvgDesktopPageSet, self).__init__( archive_data_file='data/skia_micrographygirlsvg_desktop.json') urls_list = [ # Why: skbug.com/10752 'https://storage.googleapis.com/skia-recreateskps-hosted-pages/micrography.svg', ] for url in urls_list: self.AddStory(SkiaBuildbotDesktopPage(url, self))
330c4d257f987b72e23592308d0af6567e2f0064
snippets/keras/mnist_cross_validation.py
snippets/keras/mnist_cross_validation.py
import numpy as np np.random.seed(1337) # for reproducibility from keras.datasets import mnist from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation from keras.optimizers import RMSprop from keras.utils import np_utils import pandas as pd from sklearn.model_selection import StratifiedKFold batch_size = 128 nb_classes = 10 nb_epoch = 20 n_splits = 10 # the data, shuffled and split between train and test sets (X_cv, y_cv), (X_test, y_test) = mnist.load_data() X_cv = X_cv.reshape(60000, 784) X_test = X_test.reshape(10000, 784) X_cv = X_cv.astype('float32') X_test = X_test.astype('float32') X_cv /= 255 X_test /= 255 print(X_cv.shape[0], 'train samples') print(X_test.shape[0], 'test samples') # convert class vectors to binary class matrices Y_cv = np_utils.to_categorical(y_cv, nb_classes) Y_test = np_utils.to_categorical(y_test, nb_classes) def create_model(): model = Sequential() model.add(Dense(512, input_shape=(784,))) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(10)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy']) return model skf = StratifiedKFold(n_splits=n_splits, shuffle=True) results = pd.DataFrame(columns=['loss', 'acc']) result_stats = None for i, indices in enumerate(skf.split(X_cv, y_cv)): idx_train, idx_val = indices print("Running fold", i + 1, "/", n_splits) model = create_model() if i == 0: model.summary() X_train, Y_train = X_cv[idx_train], Y_cv[idx_train] X_val, Y_val = X_cv[idx_val], Y_cv[idx_val] model.fit( X_train, Y_train, validation_data=(X_val, Y_val), batch_size=batch_size, nb_epoch=nb_epoch, verbose=0) results.loc[i] = model.evaluate(X_val, Y_val, verbose=0) result_stats = pd.DataFrame({'mean': results.mean(axis=0), 'std': results.std(axis=0)}) print(result_stats) print(results)
Add a working example of StratifiedKFold cross-validation on MNIST with Keras.
Add a working example of StratifiedKFold cross-validation on MNIST with Keras. Compute the mean/std of the loss and accuracy metrics over the folds.
Python
mit
bzamecnik/ml,bzamecnik/ml,bzamecnik/ml,bzamecnik/ml-playground,bzamecnik/ml-playground
Add a working example of StratifiedKFold cross-validation on MNIST with Keras. Compute the mean/std of the loss and accuracy metrics over the folds.
import numpy as np np.random.seed(1337) # for reproducibility from keras.datasets import mnist from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation from keras.optimizers import RMSprop from keras.utils import np_utils import pandas as pd from sklearn.model_selection import StratifiedKFold batch_size = 128 nb_classes = 10 nb_epoch = 20 n_splits = 10 # the data, shuffled and split between train and test sets (X_cv, y_cv), (X_test, y_test) = mnist.load_data() X_cv = X_cv.reshape(60000, 784) X_test = X_test.reshape(10000, 784) X_cv = X_cv.astype('float32') X_test = X_test.astype('float32') X_cv /= 255 X_test /= 255 print(X_cv.shape[0], 'train samples') print(X_test.shape[0], 'test samples') # convert class vectors to binary class matrices Y_cv = np_utils.to_categorical(y_cv, nb_classes) Y_test = np_utils.to_categorical(y_test, nb_classes) def create_model(): model = Sequential() model.add(Dense(512, input_shape=(784,))) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(10)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy']) return model skf = StratifiedKFold(n_splits=n_splits, shuffle=True) results = pd.DataFrame(columns=['loss', 'acc']) result_stats = None for i, indices in enumerate(skf.split(X_cv, y_cv)): idx_train, idx_val = indices print("Running fold", i + 1, "/", n_splits) model = create_model() if i == 0: model.summary() X_train, Y_train = X_cv[idx_train], Y_cv[idx_train] X_val, Y_val = X_cv[idx_val], Y_cv[idx_val] model.fit( X_train, Y_train, validation_data=(X_val, Y_val), batch_size=batch_size, nb_epoch=nb_epoch, verbose=0) results.loc[i] = model.evaluate(X_val, Y_val, verbose=0) result_stats = pd.DataFrame({'mean': results.mean(axis=0), 'std': results.std(axis=0)}) print(result_stats) print(results)
<commit_before><commit_msg>Add a working example of StratifiedKFold cross-validation on MNIST with Keras. Compute the mean/std of the loss and accuracy metrics over the folds.<commit_after>
import numpy as np np.random.seed(1337) # for reproducibility from keras.datasets import mnist from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation from keras.optimizers import RMSprop from keras.utils import np_utils import pandas as pd from sklearn.model_selection import StratifiedKFold batch_size = 128 nb_classes = 10 nb_epoch = 20 n_splits = 10 # the data, shuffled and split between train and test sets (X_cv, y_cv), (X_test, y_test) = mnist.load_data() X_cv = X_cv.reshape(60000, 784) X_test = X_test.reshape(10000, 784) X_cv = X_cv.astype('float32') X_test = X_test.astype('float32') X_cv /= 255 X_test /= 255 print(X_cv.shape[0], 'train samples') print(X_test.shape[0], 'test samples') # convert class vectors to binary class matrices Y_cv = np_utils.to_categorical(y_cv, nb_classes) Y_test = np_utils.to_categorical(y_test, nb_classes) def create_model(): model = Sequential() model.add(Dense(512, input_shape=(784,))) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(10)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy']) return model skf = StratifiedKFold(n_splits=n_splits, shuffle=True) results = pd.DataFrame(columns=['loss', 'acc']) result_stats = None for i, indices in enumerate(skf.split(X_cv, y_cv)): idx_train, idx_val = indices print("Running fold", i + 1, "/", n_splits) model = create_model() if i == 0: model.summary() X_train, Y_train = X_cv[idx_train], Y_cv[idx_train] X_val, Y_val = X_cv[idx_val], Y_cv[idx_val] model.fit( X_train, Y_train, validation_data=(X_val, Y_val), batch_size=batch_size, nb_epoch=nb_epoch, verbose=0) results.loc[i] = model.evaluate(X_val, Y_val, verbose=0) result_stats = pd.DataFrame({'mean': results.mean(axis=0), 'std': results.std(axis=0)}) print(result_stats) print(results)
Add a working example of StratifiedKFold cross-validation on MNIST with Keras. Compute the mean/std of the loss and accuracy metrics over the folds.import numpy as np np.random.seed(1337) # for reproducibility from keras.datasets import mnist from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation from keras.optimizers import RMSprop from keras.utils import np_utils import pandas as pd from sklearn.model_selection import StratifiedKFold batch_size = 128 nb_classes = 10 nb_epoch = 20 n_splits = 10 # the data, shuffled and split between train and test sets (X_cv, y_cv), (X_test, y_test) = mnist.load_data() X_cv = X_cv.reshape(60000, 784) X_test = X_test.reshape(10000, 784) X_cv = X_cv.astype('float32') X_test = X_test.astype('float32') X_cv /= 255 X_test /= 255 print(X_cv.shape[0], 'train samples') print(X_test.shape[0], 'test samples') # convert class vectors to binary class matrices Y_cv = np_utils.to_categorical(y_cv, nb_classes) Y_test = np_utils.to_categorical(y_test, nb_classes) def create_model(): model = Sequential() model.add(Dense(512, input_shape=(784,))) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(10)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy']) return model skf = StratifiedKFold(n_splits=n_splits, shuffle=True) results = pd.DataFrame(columns=['loss', 'acc']) result_stats = None for i, indices in enumerate(skf.split(X_cv, y_cv)): idx_train, idx_val = indices print("Running fold", i + 1, "/", n_splits) model = create_model() if i == 0: model.summary() X_train, Y_train = X_cv[idx_train], Y_cv[idx_train] X_val, Y_val = X_cv[idx_val], Y_cv[idx_val] model.fit( X_train, Y_train, validation_data=(X_val, Y_val), batch_size=batch_size, nb_epoch=nb_epoch, verbose=0) results.loc[i] = model.evaluate(X_val, Y_val, verbose=0) result_stats = pd.DataFrame({'mean': results.mean(axis=0), 'std': results.std(axis=0)}) print(result_stats) print(results)
<commit_before><commit_msg>Add a working example of StratifiedKFold cross-validation on MNIST with Keras. Compute the mean/std of the loss and accuracy metrics over the folds.<commit_after>import numpy as np np.random.seed(1337) # for reproducibility from keras.datasets import mnist from keras.models import Sequential from keras.layers.core import Dense, Dropout, Activation from keras.optimizers import RMSprop from keras.utils import np_utils import pandas as pd from sklearn.model_selection import StratifiedKFold batch_size = 128 nb_classes = 10 nb_epoch = 20 n_splits = 10 # the data, shuffled and split between train and test sets (X_cv, y_cv), (X_test, y_test) = mnist.load_data() X_cv = X_cv.reshape(60000, 784) X_test = X_test.reshape(10000, 784) X_cv = X_cv.astype('float32') X_test = X_test.astype('float32') X_cv /= 255 X_test /= 255 print(X_cv.shape[0], 'train samples') print(X_test.shape[0], 'test samples') # convert class vectors to binary class matrices Y_cv = np_utils.to_categorical(y_cv, nb_classes) Y_test = np_utils.to_categorical(y_test, nb_classes) def create_model(): model = Sequential() model.add(Dense(512, input_shape=(784,))) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(512)) model.add(Activation('relu')) model.add(Dropout(0.2)) model.add(Dense(10)) model.add(Activation('softmax')) model.compile(loss='categorical_crossentropy', optimizer=RMSprop(), metrics=['accuracy']) return model skf = StratifiedKFold(n_splits=n_splits, shuffle=True) results = pd.DataFrame(columns=['loss', 'acc']) result_stats = None for i, indices in enumerate(skf.split(X_cv, y_cv)): idx_train, idx_val = indices print("Running fold", i + 1, "/", n_splits) model = create_model() if i == 0: model.summary() X_train, Y_train = X_cv[idx_train], Y_cv[idx_train] X_val, Y_val = X_cv[idx_val], Y_cv[idx_val] model.fit( X_train, Y_train, validation_data=(X_val, Y_val), batch_size=batch_size, nb_epoch=nb_epoch, verbose=0) results.loc[i] = model.evaluate(X_val, Y_val, verbose=0) result_stats = pd.DataFrame({'mean': results.mean(axis=0), 'std': results.std(axis=0)}) print(result_stats) print(results)
2ed4205e88f322e52fe2ade40f053c28f8a076a3
csunplugged/tests/utils/test_str_to_bool.py
csunplugged/tests/utils/test_str_to_bool.py
"""Test class for str_to_bool module.""" from django.test import SimpleTestCase from utils.str_to_bool import str_to_bool class StrToBoolTest(SimpleTestCase): """Test class for str_to_bool module.""" def test_true(self): self.assertTrue(str_to_bool("True")) def test_false(self): self.assertFalse(str_to_bool("False")) def test_yes(self): self.assertTrue(str_to_bool("yes")) def test_no(self): self.assertFalse(str_to_bool("no")) def test_other_value(self): self.assertEqual( str_to_bool("example"), "example" ) def test_other_value_invalid_flag(self): self.assertRaises( ValueError, str_to_bool, "example", True )
Add tests for str_to_bool module.
Add tests for str_to_bool module.
Python
mit
uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged
Add tests for str_to_bool module.
"""Test class for str_to_bool module.""" from django.test import SimpleTestCase from utils.str_to_bool import str_to_bool class StrToBoolTest(SimpleTestCase): """Test class for str_to_bool module.""" def test_true(self): self.assertTrue(str_to_bool("True")) def test_false(self): self.assertFalse(str_to_bool("False")) def test_yes(self): self.assertTrue(str_to_bool("yes")) def test_no(self): self.assertFalse(str_to_bool("no")) def test_other_value(self): self.assertEqual( str_to_bool("example"), "example" ) def test_other_value_invalid_flag(self): self.assertRaises( ValueError, str_to_bool, "example", True )
<commit_before><commit_msg>Add tests for str_to_bool module.<commit_after>
"""Test class for str_to_bool module.""" from django.test import SimpleTestCase from utils.str_to_bool import str_to_bool class StrToBoolTest(SimpleTestCase): """Test class for str_to_bool module.""" def test_true(self): self.assertTrue(str_to_bool("True")) def test_false(self): self.assertFalse(str_to_bool("False")) def test_yes(self): self.assertTrue(str_to_bool("yes")) def test_no(self): self.assertFalse(str_to_bool("no")) def test_other_value(self): self.assertEqual( str_to_bool("example"), "example" ) def test_other_value_invalid_flag(self): self.assertRaises( ValueError, str_to_bool, "example", True )
Add tests for str_to_bool module."""Test class for str_to_bool module.""" from django.test import SimpleTestCase from utils.str_to_bool import str_to_bool class StrToBoolTest(SimpleTestCase): """Test class for str_to_bool module.""" def test_true(self): self.assertTrue(str_to_bool("True")) def test_false(self): self.assertFalse(str_to_bool("False")) def test_yes(self): self.assertTrue(str_to_bool("yes")) def test_no(self): self.assertFalse(str_to_bool("no")) def test_other_value(self): self.assertEqual( str_to_bool("example"), "example" ) def test_other_value_invalid_flag(self): self.assertRaises( ValueError, str_to_bool, "example", True )
<commit_before><commit_msg>Add tests for str_to_bool module.<commit_after>"""Test class for str_to_bool module.""" from django.test import SimpleTestCase from utils.str_to_bool import str_to_bool class StrToBoolTest(SimpleTestCase): """Test class for str_to_bool module.""" def test_true(self): self.assertTrue(str_to_bool("True")) def test_false(self): self.assertFalse(str_to_bool("False")) def test_yes(self): self.assertTrue(str_to_bool("yes")) def test_no(self): self.assertFalse(str_to_bool("no")) def test_other_value(self): self.assertEqual( str_to_bool("example"), "example" ) def test_other_value_invalid_flag(self): self.assertRaises( ValueError, str_to_bool, "example", True )
9cb4cabe997590977b9002f731aa07734130d2d6
scikits/learn/glm/benchmarks/lars.py
scikits/learn/glm/benchmarks/lars.py
""" Benchmark for the LARS algorithm. Work in progress """ from datetime import datetime import numpy as np from scikits.learn import glm n, m = 100, 50000 X = np.random.randn(n, m) y = np.random.randn(n) print "Computing regularization path using the LARS ..." start = datetime.now() alphas, active, path = glm.lars_path(X, y, method='lasso') print "This took ", datetime.now() - start
""" Benchmark for the LARS algorithm. Work in progress """ from datetime import datetime import numpy as np from scikits.learn import glm n, m = 100, 50000 X = np.random.randn(n, m) y = np.random.randn(n) if __name__ == '__main__': print "Computing regularization path using the LARS ..." start = datetime.now() alphas, active, path = glm.lars_path(X, y, method='lasso') print "This took ", datetime.now() - start
Make sure computations do not get executed at import time, so that the tests still run.
BUG: Make sure computations do not get executed at import time, so that the tests still run.
Python
bsd-3-clause
hrjn/scikit-learn,arahuja/scikit-learn,hrjn/scikit-learn,glouppe/scikit-learn,betatim/scikit-learn,Titan-C/scikit-learn,rajat1994/scikit-learn,xavierwu/scikit-learn,amueller/scikit-learn,lucidfrontier45/scikit-learn,PrashntS/scikit-learn,raghavrv/scikit-learn,kylerbrown/scikit-learn,jzt5132/scikit-learn,ndingwall/scikit-learn,zaxtax/scikit-learn,pnedunuri/scikit-learn,ZENGXH/scikit-learn,mojoboss/scikit-learn,thilbern/scikit-learn,fyffyt/scikit-learn,AnasGhrab/scikit-learn,michigraber/scikit-learn,hitszxp/scikit-learn,hlin117/scikit-learn,qifeigit/scikit-learn,jzt5132/scikit-learn,AlexRobson/scikit-learn,sergeyf/scikit-learn,Djabbz/scikit-learn,Myasuka/scikit-learn,f3r/scikit-learn,pompiduskus/scikit-learn,Srisai85/scikit-learn,trungnt13/scikit-learn,maheshakya/scikit-learn,pompiduskus/scikit-learn,bikong2/scikit-learn,nikitasingh981/scikit-learn,ominux/scikit-learn,bikong2/scikit-learn,beepee14/scikit-learn,nhejazi/scikit-learn,yunfeilu/scikit-learn,ivannz/scikit-learn,zorojean/scikit-learn,mfjb/scikit-learn,ishanic/scikit-learn,nesterione/scikit-learn,madjelan/scikit-learn,ltiao/scikit-learn,bhargav/scikit-learn,Sentient07/scikit-learn,ElDeveloper/scikit-learn,aminert/scikit-learn,lin-credible/scikit-learn,quheng/scikit-learn,AIML/scikit-learn,simon-pepin/scikit-learn,joshloyal/scikit-learn,marcocaccin/scikit-learn,anntzer/scikit-learn,jpautom/scikit-learn,loli/semisupervisedforests,gclenaghan/scikit-learn,jblackburne/scikit-learn,vybstat/scikit-learn,fredhusser/scikit-learn,icdishb/scikit-learn,ssaeger/scikit-learn,mfjb/scikit-learn,sgenoud/scikit-learn,imaculate/scikit-learn,deepesch/scikit-learn,mwv/scikit-learn,frank-tancf/scikit-learn,samzhang111/scikit-learn,Akshay0724/scikit-learn,Jimmy-Morzaria/scikit-learn,aetilley/scikit-learn,kmike/scikit-learn,kevin-intel/scikit-learn,AlexandreAbraham/scikit-learn,lenovor/scikit-learn,huobaowangxi/scikit-learn,glemaitre/scikit-learn,NunoEdgarGub1/scikit-learn,jmschrei/scikit-learn,Jimmy-Morzaria/scikit-learn,B3AU/waveTree,anirudhjayaraman/scikit-learn,jjx02230808/project0223,vshtanko/scikit-learn,costypetrisor/scikit-learn,eickenberg/scikit-learn,fabianp/scikit-learn,ahoyosid/scikit-learn,florian-f/sklearn,abhishekgahlot/scikit-learn,madjelan/scikit-learn,ogrisel/scikit-learn,B3AU/waveTree,themrmax/scikit-learn,rsivapr/scikit-learn,dhruv13J/scikit-learn,khkaminska/scikit-learn,eg-zhang/scikit-learn,hainm/scikit-learn,djgagne/scikit-learn,kmike/scikit-learn,larsmans/scikit-learn,ilo10/scikit-learn,ky822/scikit-learn,mattgiguere/scikit-learn,bthirion/scikit-learn,r-mart/scikit-learn,CVML/scikit-learn,zuku1985/scikit-learn,smartscheduling/scikit-learn-categorical-tree,Akshay0724/scikit-learn,theoryno3/scikit-learn,icdishb/scikit-learn,cainiaocome/scikit-learn,zuku1985/scikit-learn,aewhatley/scikit-learn,zaxtax/scikit-learn,DonBeo/scikit-learn,jmschrei/scikit-learn,samzhang111/scikit-learn,ElDeveloper/scikit-learn,ningchi/scikit-learn,mehdidc/scikit-learn,costypetrisor/scikit-learn,fredhusser/scikit-learn,nrhine1/scikit-learn,vshtanko/scikit-learn,betatim/scikit-learn,alexsavio/scikit-learn,ephes/scikit-learn,MartinDelzant/scikit-learn,maheshakya/scikit-learn,sinhrks/scikit-learn,aabadie/scikit-learn,nhejazi/scikit-learn,IshankGulati/scikit-learn,henridwyer/scikit-learn,deepesch/scikit-learn,Srisai85/scikit-learn,RPGOne/scikit-learn,hdmetor/scikit-learn,aflaxman/scikit-learn,davidgbe/scikit-learn,HolgerPeters/scikit-learn,heli522/scikit-learn,xiaoxiamii/scikit-learn,0x0all/scikit-learn,ndingwall/scikit-learn,fengzhyuan/scikit-learn,mattgiguere/scikit-learn,cdegroc/scikit-learn,lin-credible/scikit-learn,RachitKansal/scikit-learn,NunoEdgarGub1/scikit-learn,mattgiguere/scikit-learn,bikong2/scikit-learn,aminert/scikit-learn,lin-credible/scikit-learn,treycausey/scikit-learn,kaichogami/scikit-learn,mehdidc/scikit-learn,lazywei/scikit-learn,davidgbe/scikit-learn,vivekmishra1991/scikit-learn,saiwing-yeung/scikit-learn,JeanKossaifi/scikit-learn,ngoix/OCRF,scikit-learn/scikit-learn,andrewnc/scikit-learn,ankurankan/scikit-learn,larsmans/scikit-learn,ominux/scikit-learn,yonglehou/scikit-learn,jseabold/scikit-learn,jakobworldpeace/scikit-learn,qifeigit/scikit-learn,mfjb/scikit-learn,Garrett-R/scikit-learn,jm-begon/scikit-learn,jakobworldpeace/scikit-learn,LiaoPan/scikit-learn,dingocuster/scikit-learn,aflaxman/scikit-learn,ogrisel/scikit-learn,xzh86/scikit-learn,ningchi/scikit-learn,meduz/scikit-learn,hainm/scikit-learn,Obus/scikit-learn,ElDeveloper/scikit-learn,florian-f/sklearn,jseabold/scikit-learn,florian-f/sklearn,walterreade/scikit-learn,mfjb/scikit-learn,pratapvardhan/scikit-learn,liangz0707/scikit-learn,nmayorov/scikit-learn,DSLituiev/scikit-learn,quheng/scikit-learn,OshynSong/scikit-learn,CVML/scikit-learn,wanggang3333/scikit-learn,eickenberg/scikit-learn,liberatorqjw/scikit-learn,pypot/scikit-learn,samuel1208/scikit-learn,waterponey/scikit-learn,nomadcube/scikit-learn,tawsifkhan/scikit-learn,macks22/scikit-learn,murali-munna/scikit-learn,f3r/scikit-learn,massmutual/scikit-learn,sinhrks/scikit-learn,moutai/scikit-learn,luo66/scikit-learn,jakobworldpeace/scikit-learn,sgenoud/scikit-learn,olologin/scikit-learn,mayblue9/scikit-learn,abhishekkrthakur/scikit-learn,henrykironde/scikit-learn,nrhine1/scikit-learn,etkirsch/scikit-learn,krez13/scikit-learn,sumspr/scikit-learn,r-mart/scikit-learn,rexshihaoren/scikit-learn,h2educ/scikit-learn,petosegan/scikit-learn,rahul-c1/scikit-learn,cainiaocome/scikit-learn,fbagirov/scikit-learn,zhenv5/scikit-learn,ankurankan/scikit-learn,cybernet14/scikit-learn,RomainBrault/scikit-learn,ningchi/scikit-learn,wanggang3333/scikit-learn,PrashntS/scikit-learn,jkarnows/scikit-learn,clemkoa/scikit-learn,ningchi/scikit-learn,madjelan/scikit-learn,bthirion/scikit-learn,alvarofierroclavero/scikit-learn,thilbern/scikit-learn,sarahgrogan/scikit-learn,liyu1990/sklearn,fabioticconi/scikit-learn,elkingtonmcb/scikit-learn,olologin/scikit-learn,Jimmy-Morzaria/scikit-learn,jayflo/scikit-learn,MartinDelzant/scikit-learn,mattilyra/scikit-learn,Fireblend/scikit-learn,untom/scikit-learn,abhishekkrthakur/scikit-learn,Clyde-fare/scikit-learn,nhejazi/scikit-learn,procoder317/scikit-learn,Akshay0724/scikit-learn,massmutual/scikit-learn,trungnt13/scikit-learn,ilyes14/scikit-learn,luo66/scikit-learn,vinayak-mehta/scikit-learn,nesterione/scikit-learn,anurag313/scikit-learn,phdowling/scikit-learn,xiaoxiamii/scikit-learn,yyjiang/scikit-learn,mhue/scikit-learn,RachitKansal/scikit-learn,lbishal/scikit-learn,rvraghav93/scikit-learn,eg-zhang/scikit-learn,IssamLaradji/scikit-learn,IshankGulati/scikit-learn,Titan-C/scikit-learn,billy-inn/scikit-learn,fabioticconi/scikit-learn,depet/scikit-learn,tdhopper/scikit-learn,ivannz/scikit-learn,iismd17/scikit-learn,elkingtonmcb/scikit-learn,siutanwong/scikit-learn,heli522/scikit-learn,pypot/scikit-learn,fredhusser/scikit-learn,JosmanPS/scikit-learn,jakirkham/scikit-learn,ishanic/scikit-learn,jpautom/scikit-learn,Akshay0724/scikit-learn,etkirsch/scikit-learn,henridwyer/scikit-learn,toastedcornflakes/scikit-learn,giorgiop/scikit-learn,arabenjamin/scikit-learn,smartscheduling/scikit-learn-categorical-tree,rsivapr/scikit-learn,luo66/scikit-learn,466152112/scikit-learn,PatrickOReilly/scikit-learn,rrohan/scikit-learn,huobaowangxi/scikit-learn,shangwuhencc/scikit-learn,Garrett-R/scikit-learn,shyamalschandra/scikit-learn,henridwyer/scikit-learn,waterponey/scikit-learn,waterponey/scikit-learn,wanggang3333/scikit-learn,Lawrence-Liu/scikit-learn,saiwing-yeung/scikit-learn,wazeerzulfikar/scikit-learn,UNR-AERIAL/scikit-learn,jaidevd/scikit-learn,michigraber/scikit-learn,ngoix/OCRF,ldirer/scikit-learn,treycausey/scikit-learn,xyguo/scikit-learn,dhruv13J/scikit-learn,kagayakidan/scikit-learn,sumspr/scikit-learn,simon-pepin/scikit-learn,nomadcube/scikit-learn,rrohan/scikit-learn,mrshu/scikit-learn,ivannz/scikit-learn,AlexRobson/scikit-learn,fbagirov/scikit-learn,jorik041/scikit-learn,huzq/scikit-learn,TomDLT/scikit-learn,massmutual/scikit-learn,ldirer/scikit-learn,MatthieuBizien/scikit-learn,mhdella/scikit-learn,jmschrei/scikit-learn,JosmanPS/scikit-learn,wzbozon/scikit-learn,liberatorqjw/scikit-learn,devanshdalal/scikit-learn,Obus/scikit-learn,nmayorov/scikit-learn,q1ang/scikit-learn,ndingwall/scikit-learn,belltailjp/scikit-learn,ChanderG/scikit-learn,ltiao/scikit-learn,RayMick/scikit-learn,jjx02230808/project0223,victorbergelin/scikit-learn,sanketloke/scikit-learn,russel1237/scikit-learn,dsquareindia/scikit-learn,pkruskal/scikit-learn,rajat1994/scikit-learn,rohanp/scikit-learn,B3AU/waveTree,yanlend/scikit-learn,h2educ/scikit-learn,tawsifkhan/scikit-learn,UNR-AERIAL/scikit-learn,MartinSavc/scikit-learn,466152112/scikit-learn,rexshihaoren/scikit-learn,cauchycui/scikit-learn,evgchz/scikit-learn,ZENGXH/scikit-learn,henrykironde/scikit-learn,saiwing-yeung/scikit-learn,arjoly/scikit-learn,elkingtonmcb/scikit-learn,ltiao/scikit-learn,eickenberg/scikit-learn,jkarnows/scikit-learn,zorroblue/scikit-learn,manhhomienbienthuy/scikit-learn,Nyker510/scikit-learn,krez13/scikit-learn,thientu/scikit-learn,khkaminska/scikit-learn,thientu/scikit-learn,dhruv13J/scikit-learn,herilalaina/scikit-learn,depet/scikit-learn,AIML/scikit-learn,liyu1990/sklearn,idlead/scikit-learn,YinongLong/scikit-learn,kjung/scikit-learn,Clyde-fare/scikit-learn,ngoix/OCRF,r-mart/scikit-learn,shyamalschandra/scikit-learn,ogrisel/scikit-learn,meduz/scikit-learn,lenovor/scikit-learn,dhruv13J/scikit-learn,moutai/scikit-learn,hainm/scikit-learn,billy-inn/scikit-learn,vigilv/scikit-learn,xwolf12/scikit-learn,zihua/scikit-learn,xubenben/scikit-learn,amueller/scikit-learn,jjx02230808/project0223,simon-pepin/scikit-learn,cdegroc/scikit-learn,clemkoa/scikit-learn,ZenDevelopmentSystems/scikit-learn,sarahgrogan/scikit-learn,Djabbz/scikit-learn,espg/scikit-learn,Windy-Ground/scikit-learn,anntzer/scikit-learn,sonnyhu/scikit-learn,sonnyhu/scikit-learn,akionakamura/scikit-learn,henridwyer/scikit-learn,mrshu/scikit-learn,Djabbz/scikit-learn,JsNoNo/scikit-learn,loli/sklearn-ensembletrees,AIML/scikit-learn,xuewei4d/scikit-learn,rrohan/scikit-learn,466152112/scikit-learn,hitszxp/scikit-learn,yyjiang/scikit-learn,yyjiang/scikit-learn,mattilyra/scikit-learn,mblondel/scikit-learn,shenzebang/scikit-learn,pythonvietnam/scikit-learn,Aasmi/scikit-learn,petosegan/scikit-learn,jakirkham/scikit-learn,yask123/scikit-learn,nelson-liu/scikit-learn,quheng/scikit-learn,trungnt13/scikit-learn,BiaDarkia/scikit-learn,potash/scikit-learn,victorbergelin/scikit-learn,ogrisel/scikit-learn,YinongLong/scikit-learn,adamgreenhall/scikit-learn,shahankhatch/scikit-learn,xubenben/scikit-learn,carrillo/scikit-learn,davidgbe/scikit-learn,vortex-ape/scikit-learn,luo66/scikit-learn,AnasGhrab/scikit-learn,waterponey/scikit-learn,xuewei4d/scikit-learn,aetilley/scikit-learn,mattilyra/scikit-learn,zaxtax/scikit-learn,potash/scikit-learn,ChanChiChoi/scikit-learn,AlexRobson/scikit-learn,justincassidy/scikit-learn,mlyundin/scikit-learn,jlegendary/scikit-learn,frank-tancf/scikit-learn,amueller/scikit-learn,NelisVerhoef/scikit-learn,alvarofierroclavero/scikit-learn,robbymeals/scikit-learn,loli/sklearn-ensembletrees,siutanwong/scikit-learn,CforED/Machine-Learning,jblackburne/scikit-learn,mhue/scikit-learn,mayblue9/scikit-learn,Myasuka/scikit-learn,chrisburr/scikit-learn,zorojean/scikit-learn,vinayak-mehta/scikit-learn,beepee14/scikit-learn,mjgrav2001/scikit-learn,fabianp/scikit-learn,murali-munna/scikit-learn,ChanChiChoi/scikit-learn,spallavolu/scikit-learn,yanlend/scikit-learn,ZENGXH/scikit-learn,JeanKossaifi/scikit-learn,cybernet14/scikit-learn,beepee14/scikit-learn,ssaeger/scikit-learn,anirudhjayaraman/scikit-learn,spallavolu/scikit-learn,fyffyt/scikit-learn,abimannans/scikit-learn,abhishekgahlot/scikit-learn,3manuek/scikit-learn,marcocaccin/scikit-learn,xwolf12/scikit-learn,jaidevd/scikit-learn,cainiaocome/scikit-learn,russel1237/scikit-learn,glennq/scikit-learn,MohammedWasim/scikit-learn,fredhusser/scikit-learn,rohanp/scikit-learn,yunfeilu/scikit-learn,hitszxp/scikit-learn,AlexandreAbraham/scikit-learn,cauchycui/scikit-learn,fabioticconi/scikit-learn,Aasmi/scikit-learn,kmike/scikit-learn,aewhatley/scikit-learn,zorojean/scikit-learn,Lawrence-Liu/scikit-learn,jorik041/scikit-learn,OshynSong/scikit-learn,aminert/scikit-learn,0asa/scikit-learn,procoder317/scikit-learn,macks22/scikit-learn,aewhatley/scikit-learn,lucidfrontier45/scikit-learn,andrewnc/scikit-learn,nesterione/scikit-learn,vermouthmjl/scikit-learn,RPGOne/scikit-learn,appapantula/scikit-learn,MartinDelzant/scikit-learn,russel1237/scikit-learn,btabibian/scikit-learn,JPFrancoia/scikit-learn,chrsrds/scikit-learn,RachitKansal/scikit-learn,0asa/scikit-learn,xzh86/scikit-learn,carrillo/scikit-learn,abhishekgahlot/scikit-learn,MatthieuBizien/scikit-learn,nomadcube/scikit-learn,xubenben/scikit-learn,cdegroc/scikit-learn,chrisburr/scikit-learn,thientu/scikit-learn,toastedcornflakes/scikit-learn,rahul-c1/scikit-learn,0x0all/scikit-learn,robbymeals/scikit-learn,xwolf12/scikit-learn,theoryno3/scikit-learn,zuku1985/scikit-learn,MechCoder/scikit-learn,sinhrks/scikit-learn,altairpearl/scikit-learn,mehdidc/scikit-learn,dsquareindia/scikit-learn,Clyde-fare/scikit-learn,lbishal/scikit-learn,hrjn/scikit-learn,AlexandreAbraham/scikit-learn,RomainBrault/scikit-learn,voxlol/scikit-learn,tmhm/scikit-learn,alexeyum/scikit-learn,ephes/scikit-learn,tosolveit/scikit-learn,yask123/scikit-learn,depet/scikit-learn,espg/scikit-learn,tosolveit/scikit-learn,altairpearl/scikit-learn,vinayak-mehta/scikit-learn,pythonvietnam/scikit-learn,btabibian/scikit-learn,toastedcornflakes/scikit-learn,hugobowne/scikit-learn,RayMick/scikit-learn,pypot/scikit-learn,rishikksh20/scikit-learn,jseabold/scikit-learn,kylerbrown/scikit-learn,jpautom/scikit-learn,mrshu/scikit-learn,nelson-liu/scikit-learn,Sentient07/scikit-learn,ycaihua/scikit-learn,LohithBlaze/scikit-learn,andaag/scikit-learn,rahuldhote/scikit-learn,dingocuster/scikit-learn,shikhardb/scikit-learn,heli522/scikit-learn,chrsrds/scikit-learn,pv/scikit-learn,MechCoder/scikit-learn,anntzer/scikit-learn,AlexandreAbraham/scikit-learn,aetilley/scikit-learn,kashif/scikit-learn,OshynSong/scikit-learn,liangz0707/scikit-learn,potash/scikit-learn,pianomania/scikit-learn,sumspr/scikit-learn,JsNoNo/scikit-learn,ClimbsRocks/scikit-learn,NunoEdgarGub1/scikit-learn,MartinSavc/scikit-learn,manashmndl/scikit-learn,UNR-AERIAL/scikit-learn,jorge2703/scikit-learn,dsquareindia/scikit-learn,arjoly/scikit-learn,herilalaina/scikit-learn,JeanKossaifi/scikit-learn,shikhardb/scikit-learn,cl4rke/scikit-learn,devanshdalal/scikit-learn,robin-lai/scikit-learn,ZENGXH/scikit-learn,iismd17/scikit-learn,ivannz/scikit-learn,pv/scikit-learn,jm-begon/scikit-learn,zhenv5/scikit-learn,anirudhjayaraman/scikit-learn,vibhorag/scikit-learn,MartinSavc/scikit-learn,0x0all/scikit-learn,imaculate/scikit-learn,vigilv/scikit-learn,tmhm/scikit-learn,mjudsp/Tsallis,JsNoNo/scikit-learn,0asa/scikit-learn,jereze/scikit-learn,RayMick/scikit-learn,jorge2703/scikit-learn,Clyde-fare/scikit-learn,roxyboy/scikit-learn,hitszxp/scikit-learn,alexsavio/scikit-learn,kevin-intel/scikit-learn,IndraVikas/scikit-learn,trungnt13/scikit-learn,sumspr/scikit-learn,MohammedWasim/scikit-learn,andaag/scikit-learn,ChanderG/scikit-learn,rajat1994/scikit-learn,alexeyum/scikit-learn,evgchz/scikit-learn,lbishal/scikit-learn,wazeerzulfikar/scikit-learn,thientu/scikit-learn,DonBeo/scikit-learn,untom/scikit-learn,bnaul/scikit-learn,chrsrds/scikit-learn,sergeyf/scikit-learn,pianomania/scikit-learn,shangwuhencc/scikit-learn,poryfly/scikit-learn,devanshdalal/scikit-learn,sinhrks/scikit-learn,plissonf/scikit-learn,sonnyhu/scikit-learn,LohithBlaze/scikit-learn,Garrett-R/scikit-learn,vshtanko/scikit-learn,shusenl/scikit-learn,Barmaley-exe/scikit-learn,xubenben/scikit-learn,f3r/scikit-learn,Windy-Ground/scikit-learn,jayflo/scikit-learn,vivekmishra1991/scikit-learn,pnedunuri/scikit-learn,tawsifkhan/scikit-learn,trankmichael/scikit-learn,murali-munna/scikit-learn,fabioticconi/scikit-learn,manashmndl/scikit-learn,arjoly/scikit-learn,IssamLaradji/scikit-learn,wazeerzulfikar/scikit-learn,smartscheduling/scikit-learn-categorical-tree,RachitKansal/scikit-learn,BiaDarkia/scikit-learn,roxyboy/scikit-learn,fzalkow/scikit-learn,justincassidy/scikit-learn,Achuth17/scikit-learn,hdmetor/scikit-learn,zihua/scikit-learn,scikit-learn/scikit-learn,jayflo/scikit-learn,arjoly/scikit-learn,jseabold/scikit-learn,mojoboss/scikit-learn,ClimbsRocks/scikit-learn,wlamond/scikit-learn,wanggang3333/scikit-learn,mikebenfield/scikit-learn,espg/scikit-learn,jereze/scikit-learn,huobaowangxi/scikit-learn,treycausey/scikit-learn,lesteve/scikit-learn,scikit-learn/scikit-learn,MechCoder/scikit-learn,vermouthmjl/scikit-learn,mblondel/scikit-learn,abimannans/scikit-learn,spallavolu/scikit-learn,manhhomienbienthuy/scikit-learn,Garrett-R/scikit-learn,xavierwu/scikit-learn,evgchz/scikit-learn,ycaihua/scikit-learn,lbishal/scikit-learn,nvoron23/scikit-learn,stylianos-kampakis/scikit-learn,Achuth17/scikit-learn,eg-zhang/scikit-learn,gotomypc/scikit-learn,fengzhyuan/scikit-learn,mxjl620/scikit-learn,equialgo/scikit-learn,sarahgrogan/scikit-learn,simon-pepin/scikit-learn,nikitasingh981/scikit-learn,jkarnows/scikit-learn,justincassidy/scikit-learn,hdmetor/scikit-learn,vortex-ape/scikit-learn,mattgiguere/scikit-learn,PatrickChrist/scikit-learn,rvraghav93/scikit-learn,0x0all/scikit-learn,trankmichael/scikit-learn,ahoyosid/scikit-learn,wzbozon/scikit-learn,kjung/scikit-learn,kylerbrown/scikit-learn,Barmaley-exe/scikit-learn,vivekmishra1991/scikit-learn,bnaul/scikit-learn,alexeyum/scikit-learn,TomDLT/scikit-learn,roxyboy/scikit-learn,rahuldhote/scikit-learn,bigdataelephants/scikit-learn,scikit-learn/scikit-learn,mwv/scikit-learn,mhdella/scikit-learn,JPFrancoia/scikit-learn,dsullivan7/scikit-learn,yonglehou/scikit-learn,wzbozon/scikit-learn,cwu2011/scikit-learn,nesterione/scikit-learn,ycaihua/scikit-learn,themrmax/scikit-learn,robin-lai/scikit-learn,btabibian/scikit-learn,rexshihaoren/scikit-learn,petosegan/scikit-learn,iismd17/scikit-learn,pv/scikit-learn,eg-zhang/scikit-learn,poryfly/scikit-learn,tdhopper/scikit-learn,Sentient07/scikit-learn,jorik041/scikit-learn,ashhher3/scikit-learn,rahul-c1/scikit-learn,mhdella/scikit-learn,nmayorov/scikit-learn,mlyundin/scikit-learn,Nyker510/scikit-learn,jzt5132/scikit-learn,macks22/scikit-learn,PatrickOReilly/scikit-learn,PatrickOReilly/scikit-learn,tmhm/scikit-learn,bhargav/scikit-learn,larsmans/scikit-learn,ashhher3/scikit-learn,siutanwong/scikit-learn,loli/sklearn-ensembletrees,loli/semisupervisedforests,djgagne/scikit-learn,appapantula/scikit-learn,kagayakidan/scikit-learn,OshynSong/scikit-learn,samuel1208/scikit-learn,depet/scikit-learn,glennq/scikit-learn,equialgo/scikit-learn,yyjiang/scikit-learn,pkruskal/scikit-learn,abimannans/scikit-learn,ElDeveloper/scikit-learn,vshtanko/scikit-learn,vigilv/scikit-learn,sonnyhu/scikit-learn,DonBeo/scikit-learn,hsuantien/scikit-learn,hsuantien/scikit-learn,fbagirov/scikit-learn,larsmans/scikit-learn,rsivapr/scikit-learn,plissonf/scikit-learn,AlexanderFabisch/scikit-learn,pkruskal/scikit-learn,procoder317/scikit-learn,glemaitre/scikit-learn,wlamond/scikit-learn,vivekmishra1991/scikit-learn,ilyes14/scikit-learn,pratapvardhan/scikit-learn,hlin117/scikit-learn,jakobworldpeace/scikit-learn,ClimbsRocks/scikit-learn,CVML/scikit-learn,poryfly/scikit-learn,fabianp/scikit-learn,krez13/scikit-learn,MechCoder/scikit-learn,ndingwall/scikit-learn,rexshihaoren/scikit-learn,AnasGhrab/scikit-learn,ngoix/OCRF,joernhees/scikit-learn,lesteve/scikit-learn,hrjn/scikit-learn,fabianp/scikit-learn,costypetrisor/scikit-learn,pnedunuri/scikit-learn,Srisai85/scikit-learn,chrsrds/scikit-learn,kagayakidan/scikit-learn,ankurankan/scikit-learn,ycaihua/scikit-learn,YinongLong/scikit-learn,Fireblend/scikit-learn,yonglehou/scikit-learn,jereze/scikit-learn,hugobowne/scikit-learn,RPGOne/scikit-learn,ky822/scikit-learn,roxyboy/scikit-learn,IndraVikas/scikit-learn,vibhorag/scikit-learn,mayblue9/scikit-learn,appapantula/scikit-learn,lucidfrontier45/scikit-learn,yask123/scikit-learn,Titan-C/scikit-learn,glennq/scikit-learn,anntzer/scikit-learn,ishanic/scikit-learn,joernhees/scikit-learn,kashif/scikit-learn,ZenDevelopmentSystems/scikit-learn,arahuja/scikit-learn,btabibian/scikit-learn,maheshakya/scikit-learn,JPFrancoia/scikit-learn,ashhher3/scikit-learn,trankmichael/scikit-learn,Vimos/scikit-learn,toastedcornflakes/scikit-learn,vibhorag/scikit-learn,cwu2011/scikit-learn,jm-begon/scikit-learn,shikhardb/scikit-learn,shahankhatch/scikit-learn,kagayakidan/scikit-learn,cauchycui/scikit-learn,etkirsch/scikit-learn,icdishb/scikit-learn,JeanKossaifi/scikit-learn,anirudhjayaraman/scikit-learn,tomlof/scikit-learn,phdowling/scikit-learn,mhdella/scikit-learn,icdishb/scikit-learn,manashmndl/scikit-learn,abhishekkrthakur/scikit-learn,DSLituiev/scikit-learn,jorge2703/scikit-learn,glouppe/scikit-learn,deepesch/scikit-learn,arahuja/scikit-learn,wlamond/scikit-learn,q1ang/scikit-learn,yanlend/scikit-learn,samuel1208/scikit-learn,stylianos-kampakis/scikit-learn,tosolveit/scikit-learn,tomlof/scikit-learn,Nyker510/scikit-learn,mugizico/scikit-learn,kashif/scikit-learn,ilo10/scikit-learn,Sentient07/scikit-learn,mrshu/scikit-learn,raghavrv/scikit-learn,arabenjamin/scikit-learn,alexeyum/scikit-learn,aetilley/scikit-learn,zihua/scikit-learn,huzq/scikit-learn,stylianos-kampakis/scikit-learn,shikhardb/scikit-learn,saiwing-yeung/scikit-learn,jakirkham/scikit-learn,hsiaoyi0504/scikit-learn,rishikksh20/scikit-learn,kevin-intel/scikit-learn,MatthieuBizien/scikit-learn,mwv/scikit-learn,clemkoa/scikit-learn,jakirkham/scikit-learn,yonglehou/scikit-learn,shusenl/scikit-learn,quheng/scikit-learn,tawsifkhan/scikit-learn,rohanp/scikit-learn,sergeyf/scikit-learn,jayflo/scikit-learn,jmetzen/scikit-learn,CforED/Machine-Learning,ominux/scikit-learn,ashhher3/scikit-learn,jereze/scikit-learn,MartinDelzant/scikit-learn,fbagirov/scikit-learn,nrhine1/scikit-learn,nrhine1/scikit-learn,glemaitre/scikit-learn,mugizico/scikit-learn,zaxtax/scikit-learn,Lawrence-Liu/scikit-learn,BiaDarkia/scikit-learn,lazywei/scikit-learn,cybernet14/scikit-learn,alvarofierroclavero/scikit-learn,michigraber/scikit-learn,victorbergelin/scikit-learn,ChanChiChoi/scikit-learn,adamgreenhall/scikit-learn,dsullivan7/scikit-learn,schets/scikit-learn,untom/scikit-learn,fengzhyuan/scikit-learn,PrashntS/scikit-learn,meduz/scikit-learn,pypot/scikit-learn,pythonvietnam/scikit-learn,manashmndl/scikit-learn,rvraghav93/scikit-learn,vermouthmjl/scikit-learn,CforED/Machine-Learning,NelisVerhoef/scikit-learn,tmhm/scikit-learn,tosolveit/scikit-learn,pv/scikit-learn,costypetrisor/scikit-learn,pythonvietnam/scikit-learn,hsiaoyi0504/scikit-learn,anurag313/scikit-learn,ngoix/OCRF,joernhees/scikit-learn,LohithBlaze/scikit-learn,zihua/scikit-learn,xavierwu/scikit-learn,potash/scikit-learn,ycaihua/scikit-learn,imaculate/scikit-learn,lenovor/scikit-learn,joshloyal/scikit-learn,h2educ/scikit-learn,tdhopper/scikit-learn,florian-f/sklearn,meduz/scikit-learn,MartinSavc/scikit-learn,Srisai85/scikit-learn,sarahgrogan/scikit-learn,jkarnows/scikit-learn,voxlol/scikit-learn,jorge2703/scikit-learn,Garrett-R/scikit-learn,samuel1208/scikit-learn,schets/scikit-learn,hsuantien/scikit-learn,mblondel/scikit-learn,walterreade/scikit-learn,zuku1985/scikit-learn,mehdidc/scikit-learn,Windy-Ground/scikit-learn,joshloyal/scikit-learn,rohanp/scikit-learn,wzbozon/scikit-learn,mjudsp/Tsallis,ishanic/scikit-learn,jmetzen/scikit-learn,manhhomienbienthuy/scikit-learn,liyu1990/sklearn,belltailjp/scikit-learn,loli/sklearn-ensembletrees,dingocuster/scikit-learn,PatrickChrist/scikit-learn,ilo10/scikit-learn,harshaneelhg/scikit-learn,tomlof/scikit-learn,mhue/scikit-learn,mxjl620/scikit-learn,Adai0808/scikit-learn,Adai0808/scikit-learn,fzalkow/scikit-learn,sgenoud/scikit-learn,liberatorqjw/scikit-learn,eickenberg/scikit-learn,zorojean/scikit-learn,stylianos-kampakis/scikit-learn,devanshdalal/scikit-learn,fzalkow/scikit-learn,fyffyt/scikit-learn,adamgreenhall/scikit-learn,andaag/scikit-learn,MatthieuBizien/scikit-learn,ChanderG/scikit-learn,lazywei/scikit-learn,aewhatley/scikit-learn,beepee14/scikit-learn,glemaitre/scikit-learn,florian-f/sklearn,ahoyosid/scikit-learn,anurag313/scikit-learn,iismd17/scikit-learn,Vimos/scikit-learn,jmschrei/scikit-learn,victorbergelin/scikit-learn,jmetzen/scikit-learn,khkaminska/scikit-learn,giorgiop/scikit-learn,altairpearl/scikit-learn,jmetzen/scikit-learn,RayMick/scikit-learn,mugizico/scikit-learn,Fireblend/scikit-learn,vibhorag/scikit-learn,LiaoPan/scikit-learn,andrewnc/scikit-learn,alexsavio/scikit-learn,mojoboss/scikit-learn,Windy-Ground/scikit-learn,heli522/scikit-learn,gclenaghan/scikit-learn,gotomypc/scikit-learn,ssaeger/scikit-learn,gclenaghan/scikit-learn,nvoron23/scikit-learn,pompiduskus/scikit-learn,PatrickChrist/scikit-learn,JsNoNo/scikit-learn,phdowling/scikit-learn,huzq/scikit-learn,marcocaccin/scikit-learn,betatim/scikit-learn,ephes/scikit-learn,xzh86/scikit-learn,arabenjamin/scikit-learn,RomainBrault/scikit-learn,terkkila/scikit-learn,xuewei4d/scikit-learn,loli/sklearn-ensembletrees,xiaoxiamii/scikit-learn,pianomania/scikit-learn,NunoEdgarGub1/scikit-learn,mrshu/scikit-learn,ZenDevelopmentSystems/scikit-learn,nmayorov/scikit-learn,huobaowangxi/scikit-learn,TomDLT/scikit-learn,sanketloke/scikit-learn,mwv/scikit-learn,xavierwu/scikit-learn,mattilyra/scikit-learn,akionakamura/scikit-learn,giorgiop/scikit-learn,kmike/scikit-learn,rahuldhote/scikit-learn,henrykironde/scikit-learn,theoryno3/scikit-learn,lazywei/scikit-learn,schets/scikit-learn,mxjl620/scikit-learn,cwu2011/scikit-learn,cwu2011/scikit-learn,mikebenfield/scikit-learn,Achuth17/scikit-learn,zorroblue/scikit-learn,fzalkow/scikit-learn,dingocuster/scikit-learn,cauchycui/scikit-learn,mjgrav2001/scikit-learn,mayblue9/scikit-learn,jlegendary/scikit-learn,rahul-c1/scikit-learn,shahankhatch/scikit-learn,xiaoxiamii/scikit-learn,ldirer/scikit-learn,shangwuhencc/scikit-learn,giorgiop/scikit-learn,bigdataelephants/scikit-learn,sergeyf/scikit-learn,dsullivan7/scikit-learn,xwolf12/scikit-learn,h2educ/scikit-learn,billy-inn/scikit-learn,pianomania/scikit-learn,3manuek/scikit-learn,massmutual/scikit-learn,vybstat/scikit-learn,wlamond/scikit-learn,gclenaghan/scikit-learn,arahuja/scikit-learn,MohammedWasim/scikit-learn,plissonf/scikit-learn,cl4rke/scikit-learn,fyffyt/scikit-learn,Obus/scikit-learn,andrewnc/scikit-learn,jaidevd/scikit-learn,themrmax/scikit-learn,mlyundin/scikit-learn,ssaeger/scikit-learn,aflaxman/scikit-learn,jm-begon/scikit-learn,rrohan/scikit-learn,JosmanPS/scikit-learn,khkaminska/scikit-learn,lesteve/scikit-learn,smartscheduling/scikit-learn-categorical-tree,jzt5132/scikit-learn,cl4rke/scikit-learn,eickenberg/scikit-learn,Aasmi/scikit-learn,robbymeals/scikit-learn,mxjl620/scikit-learn,nelson-liu/scikit-learn,glennq/scikit-learn,mlyundin/scikit-learn,thilbern/scikit-learn,deepesch/scikit-learn,dsquareindia/scikit-learn,imaculate/scikit-learn,hlin117/scikit-learn,schets/scikit-learn,PrashntS/scikit-learn,macks22/scikit-learn,sgenoud/scikit-learn,shahankhatch/scikit-learn,plissonf/scikit-learn,mugizico/scikit-learn,adamgreenhall/scikit-learn,3manuek/scikit-learn,kaichogami/scikit-learn,bnaul/scikit-learn,hugobowne/scikit-learn,LiaoPan/scikit-learn,hsuantien/scikit-learn,mattilyra/scikit-learn,Aasmi/scikit-learn,mblondel/scikit-learn,krez13/scikit-learn,djgagne/scikit-learn,TomDLT/scikit-learn,UNR-AERIAL/scikit-learn,ankurankan/scikit-learn,sanketloke/scikit-learn,NelisVerhoef/scikit-learn,ky822/scikit-learn,NelisVerhoef/scikit-learn,AnasGhrab/scikit-learn,cainiaocome/scikit-learn,herilalaina/scikit-learn,loli/semisupervisedforests,espg/scikit-learn,bnaul/scikit-learn,lesteve/scikit-learn,Djabbz/scikit-learn,bigdataelephants/scikit-learn,clemkoa/scikit-learn,CVML/scikit-learn,jblackburne/scikit-learn,treycausey/scikit-learn,Vimos/scikit-learn,AIML/scikit-learn,nvoron23/scikit-learn,pratapvardhan/scikit-learn,hugobowne/scikit-learn,marcocaccin/scikit-learn,f3r/scikit-learn,abimannans/scikit-learn,frank-tancf/scikit-learn,terkkila/scikit-learn,moutai/scikit-learn,liberatorqjw/scikit-learn,LohithBlaze/scikit-learn,rvraghav93/scikit-learn,vortex-ape/scikit-learn,Achuth17/scikit-learn,moutai/scikit-learn,shusenl/scikit-learn,Titan-C/scikit-learn,CforED/Machine-Learning,jblackburne/scikit-learn,PatrickOReilly/scikit-learn,harshaneelhg/scikit-learn,JPFrancoia/scikit-learn,IndraVikas/scikit-learn,jlegendary/scikit-learn,HolgerPeters/scikit-learn,anurag313/scikit-learn,terkkila/scikit-learn,aabadie/scikit-learn,nhejazi/scikit-learn,ChanderG/scikit-learn,Myasuka/scikit-learn,hdmetor/scikit-learn,chrisburr/scikit-learn,mjgrav2001/scikit-learn,dsullivan7/scikit-learn,thilbern/scikit-learn,chrisburr/scikit-learn,madjelan/scikit-learn,sanketloke/scikit-learn,kaichogami/scikit-learn,tdhopper/scikit-learn,shangwuhencc/scikit-learn,lin-credible/scikit-learn,equialgo/scikit-learn,zorroblue/scikit-learn,ZenDevelopmentSystems/scikit-learn,trankmichael/scikit-learn,huzq/scikit-learn,xyguo/scikit-learn,Obus/scikit-learn,mjudsp/Tsallis,AlexanderFabisch/scikit-learn,aminert/scikit-learn,tomlof/scikit-learn,lucidfrontier45/scikit-learn,nikitasingh981/scikit-learn,phdowling/scikit-learn,hsiaoyi0504/scikit-learn,hainm/scikit-learn,gotomypc/scikit-learn,etkirsch/scikit-learn,xyguo/scikit-learn,Jimmy-Morzaria/scikit-learn,kjung/scikit-learn,kjung/scikit-learn,russel1237/scikit-learn,theoryno3/scikit-learn,walterreade/scikit-learn,bhargav/scikit-learn,amueller/scikit-learn,vybstat/scikit-learn,xyguo/scikit-learn,cdegroc/scikit-learn,yunfeilu/scikit-learn,robin-lai/scikit-learn,arabenjamin/scikit-learn,appapantula/scikit-learn,olologin/scikit-learn,ilyes14/scikit-learn,abhishekgahlot/scikit-learn,bhargav/scikit-learn,mjudsp/Tsallis,shenzebang/scikit-learn,abhishekkrthakur/scikit-learn,sgenoud/scikit-learn,betatim/scikit-learn,jaidevd/scikit-learn,alvarofierroclavero/scikit-learn,IssamLaradji/scikit-learn,jlegendary/scikit-learn,nikitasingh981/scikit-learn,carrillo/scikit-learn,nomadcube/scikit-learn,aabadie/scikit-learn,davidgbe/scikit-learn,petosegan/scikit-learn,samzhang111/scikit-learn,ominux/scikit-learn,mikebenfield/scikit-learn,wazeerzulfikar/scikit-learn,0x0all/scikit-learn,liangz0707/scikit-learn,rishikksh20/scikit-learn,zhenv5/scikit-learn,loli/semisupervisedforests,mikebenfield/scikit-learn,depet/scikit-learn,AlexRobson/scikit-learn,rsivapr/scikit-learn,vigilv/scikit-learn,Adai0808/scikit-learn,idlead/scikit-learn,jorik041/scikit-learn,nelson-liu/scikit-learn,shusenl/scikit-learn,procoder317/scikit-learn,ephes/scikit-learn,Lawrence-Liu/scikit-learn,jpautom/scikit-learn,hitszxp/scikit-learn,equialgo/scikit-learn,RPGOne/scikit-learn,pompiduskus/scikit-learn,ngoix/OCRF,fengzhyuan/scikit-learn,djgagne/scikit-learn,3manuek/scikit-learn,DSLituiev/scikit-learn,B3AU/waveTree,Adai0808/scikit-learn,ilyes14/scikit-learn,B3AU/waveTree,qifeigit/scikit-learn,michigraber/scikit-learn,vortex-ape/scikit-learn,yanlend/scikit-learn,voxlol/scikit-learn,AlexanderFabisch/scikit-learn,glouppe/scikit-learn,ChanChiChoi/scikit-learn,mojoboss/scikit-learn,RomainBrault/scikit-learn,hsiaoyi0504/scikit-learn,robbymeals/scikit-learn,IssamLaradji/scikit-learn,Vimos/scikit-learn,treycausey/scikit-learn,rsivapr/scikit-learn,q1ang/scikit-learn,andaag/scikit-learn,ilo10/scikit-learn,ltiao/scikit-learn,ldirer/scikit-learn,henrykironde/scikit-learn,466152112/scikit-learn,vermouthmjl/scikit-learn,HolgerPeters/scikit-learn,elkingtonmcb/scikit-learn,kylerbrown/scikit-learn,glouppe/scikit-learn,AlexanderFabisch/scikit-learn,cl4rke/scikit-learn,ankurankan/scikit-learn,PatrickChrist/scikit-learn,harshaneelhg/scikit-learn,idlead/scikit-learn,ahoyosid/scikit-learn,justincassidy/scikit-learn,liyu1990/sklearn,evgchz/scikit-learn,joernhees/scikit-learn,shyamalschandra/scikit-learn,q1ang/scikit-learn,belltailjp/scikit-learn,Nyker510/scikit-learn,frank-tancf/scikit-learn,olologin/scikit-learn,rajat1994/scikit-learn,0asa/scikit-learn,vybstat/scikit-learn,robin-lai/scikit-learn,IshankGulati/scikit-learn,Fireblend/scikit-learn,MohammedWasim/scikit-learn,DonBeo/scikit-learn,xzh86/scikit-learn,samzhang111/scikit-learn,mhue/scikit-learn,idlead/scikit-learn,akionakamura/scikit-learn,ClimbsRocks/scikit-learn,carrillo/scikit-learn,billy-inn/scikit-learn,Barmaley-exe/scikit-learn,ky822/scikit-learn,yask123/scikit-learn,murali-munna/scikit-learn,mjgrav2001/scikit-learn,joshloyal/scikit-learn,Barmaley-exe/scikit-learn,shenzebang/scikit-learn,gotomypc/scikit-learn,voxlol/scikit-learn,larsmans/scikit-learn,herilalaina/scikit-learn,untom/scikit-learn,bikong2/scikit-learn,xuewei4d/scikit-learn,maheshakya/scikit-learn,BiaDarkia/scikit-learn,aabadie/scikit-learn,qifeigit/scikit-learn,yunfeilu/scikit-learn,kaichogami/scikit-learn,spallavolu/scikit-learn,kashif/scikit-learn,kmike/scikit-learn,Myasuka/scikit-learn,raghavrv/scikit-learn,hlin117/scikit-learn,mjudsp/Tsallis,IshankGulati/scikit-learn,YinongLong/scikit-learn,0asa/scikit-learn,vinayak-mehta/scikit-learn,aflaxman/scikit-learn,r-mart/scikit-learn,shyamalschandra/scikit-learn,lucidfrontier45/scikit-learn,bigdataelephants/scikit-learn,walterreade/scikit-learn,terkkila/scikit-learn,manhhomienbienthuy/scikit-learn,shenzebang/scikit-learn,nvoron23/scikit-learn,akionakamura/scikit-learn,themrmax/scikit-learn,pratapvardhan/scikit-learn,IndraVikas/scikit-learn,harshaneelhg/scikit-learn,altairpearl/scikit-learn,siutanwong/scikit-learn,jjx02230808/project0223,bthirion/scikit-learn,liangz0707/scikit-learn,kevin-intel/scikit-learn,pkruskal/scikit-learn,lenovor/scikit-learn,zhenv5/scikit-learn,rishikksh20/scikit-learn,HolgerPeters/scikit-learn,bthirion/scikit-learn,DSLituiev/scikit-learn,pnedunuri/scikit-learn,poryfly/scikit-learn,abhishekgahlot/scikit-learn,LiaoPan/scikit-learn,evgchz/scikit-learn,maheshakya/scikit-learn,alexsavio/scikit-learn,JosmanPS/scikit-learn,zorroblue/scikit-learn,rahuldhote/scikit-learn,raghavrv/scikit-learn,belltailjp/scikit-learn,cybernet14/scikit-learn
""" Benchmark for the LARS algorithm. Work in progress """ from datetime import datetime import numpy as np from scikits.learn import glm n, m = 100, 50000 X = np.random.randn(n, m) y = np.random.randn(n) print "Computing regularization path using the LARS ..." start = datetime.now() alphas, active, path = glm.lars_path(X, y, method='lasso') print "This took ", datetime.now() - start BUG: Make sure computations do not get executed at import time, so that the tests still run.
""" Benchmark for the LARS algorithm. Work in progress """ from datetime import datetime import numpy as np from scikits.learn import glm n, m = 100, 50000 X = np.random.randn(n, m) y = np.random.randn(n) if __name__ == '__main__': print "Computing regularization path using the LARS ..." start = datetime.now() alphas, active, path = glm.lars_path(X, y, method='lasso') print "This took ", datetime.now() - start
<commit_before>""" Benchmark for the LARS algorithm. Work in progress """ from datetime import datetime import numpy as np from scikits.learn import glm n, m = 100, 50000 X = np.random.randn(n, m) y = np.random.randn(n) print "Computing regularization path using the LARS ..." start = datetime.now() alphas, active, path = glm.lars_path(X, y, method='lasso') print "This took ", datetime.now() - start <commit_msg>BUG: Make sure computations do not get executed at import time, so that the tests still run.<commit_after>
""" Benchmark for the LARS algorithm. Work in progress """ from datetime import datetime import numpy as np from scikits.learn import glm n, m = 100, 50000 X = np.random.randn(n, m) y = np.random.randn(n) if __name__ == '__main__': print "Computing regularization path using the LARS ..." start = datetime.now() alphas, active, path = glm.lars_path(X, y, method='lasso') print "This took ", datetime.now() - start
""" Benchmark for the LARS algorithm. Work in progress """ from datetime import datetime import numpy as np from scikits.learn import glm n, m = 100, 50000 X = np.random.randn(n, m) y = np.random.randn(n) print "Computing regularization path using the LARS ..." start = datetime.now() alphas, active, path = glm.lars_path(X, y, method='lasso') print "This took ", datetime.now() - start BUG: Make sure computations do not get executed at import time, so that the tests still run.""" Benchmark for the LARS algorithm. Work in progress """ from datetime import datetime import numpy as np from scikits.learn import glm n, m = 100, 50000 X = np.random.randn(n, m) y = np.random.randn(n) if __name__ == '__main__': print "Computing regularization path using the LARS ..." start = datetime.now() alphas, active, path = glm.lars_path(X, y, method='lasso') print "This took ", datetime.now() - start
<commit_before>""" Benchmark for the LARS algorithm. Work in progress """ from datetime import datetime import numpy as np from scikits.learn import glm n, m = 100, 50000 X = np.random.randn(n, m) y = np.random.randn(n) print "Computing regularization path using the LARS ..." start = datetime.now() alphas, active, path = glm.lars_path(X, y, method='lasso') print "This took ", datetime.now() - start <commit_msg>BUG: Make sure computations do not get executed at import time, so that the tests still run.<commit_after>""" Benchmark for the LARS algorithm. Work in progress """ from datetime import datetime import numpy as np from scikits.learn import glm n, m = 100, 50000 X = np.random.randn(n, m) y = np.random.randn(n) if __name__ == '__main__': print "Computing regularization path using the LARS ..." start = datetime.now() alphas, active, path = glm.lars_path(X, y, method='lasso') print "This took ", datetime.now() - start
e6e42d9bd99dca0b539131fdc27788803f06ee8f
is/clean_pron_dict_csv.py
is/clean_pron_dict_csv.py
#! /usr/bin/python2 # -*- coding: utf-8 -*- # # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tool for tidying the format of WordList_IPA_SAMPA.csv. Example usage: $ ./clean_pron_dict_csv.py ../third_party/pron_dict/WordList_IPA_SAMPA.csv This tidies up the input CSV file and adds row numbers, to simplify processing by subsequent tools. """ from __future__ import unicode_literals import codecs import sys STDIN = codecs.getreader('utf-8')(sys.stdin) STDOUT = codecs.getwriter('utf-8')(sys.stdout) def main(): if len(sys.argv) > 1: reader = codecs.open(sys.argv[1], 'r', 'utf-8') else: reader = STDIN if len(sys.argv) > 2: writer = codecs.open(sys.argv[2], 'w', 'utf-8') else: writer = STDOUT row = 0 for line in reader: row += 1 if 'SAMPA' in line: continue line = line.rstrip('\r\n') fields = line.split(',') assert len(fields) == 4 fields = [f.strip('"') for f in fields] for field in fields: assert '"' not in field if fields[0] == '': continue assert fields[3] in ('', '\\') writer.write('%s\n' % ','.join([str(row)] + fields[0:3])) reader.close() writer.close() return if __name__ == '__main__': main()
Add tool for tidying the Icelandic pron_dict CSV file.
Add tool for tidying the Icelandic pron_dict CSV file.
Python
apache-2.0
google/language-resources,google/language-resources,googlei18n/language-resources,google/language-resources,googlei18n/language-resources,googlei18n/language-resources,google/language-resources,google/language-resources,googlei18n/language-resources,googlei18n/language-resources,googlei18n/language-resources,google/language-resources
Add tool for tidying the Icelandic pron_dict CSV file.
#! /usr/bin/python2 # -*- coding: utf-8 -*- # # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tool for tidying the format of WordList_IPA_SAMPA.csv. Example usage: $ ./clean_pron_dict_csv.py ../third_party/pron_dict/WordList_IPA_SAMPA.csv This tidies up the input CSV file and adds row numbers, to simplify processing by subsequent tools. """ from __future__ import unicode_literals import codecs import sys STDIN = codecs.getreader('utf-8')(sys.stdin) STDOUT = codecs.getwriter('utf-8')(sys.stdout) def main(): if len(sys.argv) > 1: reader = codecs.open(sys.argv[1], 'r', 'utf-8') else: reader = STDIN if len(sys.argv) > 2: writer = codecs.open(sys.argv[2], 'w', 'utf-8') else: writer = STDOUT row = 0 for line in reader: row += 1 if 'SAMPA' in line: continue line = line.rstrip('\r\n') fields = line.split(',') assert len(fields) == 4 fields = [f.strip('"') for f in fields] for field in fields: assert '"' not in field if fields[0] == '': continue assert fields[3] in ('', '\\') writer.write('%s\n' % ','.join([str(row)] + fields[0:3])) reader.close() writer.close() return if __name__ == '__main__': main()
<commit_before><commit_msg>Add tool for tidying the Icelandic pron_dict CSV file.<commit_after>
#! /usr/bin/python2 # -*- coding: utf-8 -*- # # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tool for tidying the format of WordList_IPA_SAMPA.csv. Example usage: $ ./clean_pron_dict_csv.py ../third_party/pron_dict/WordList_IPA_SAMPA.csv This tidies up the input CSV file and adds row numbers, to simplify processing by subsequent tools. """ from __future__ import unicode_literals import codecs import sys STDIN = codecs.getreader('utf-8')(sys.stdin) STDOUT = codecs.getwriter('utf-8')(sys.stdout) def main(): if len(sys.argv) > 1: reader = codecs.open(sys.argv[1], 'r', 'utf-8') else: reader = STDIN if len(sys.argv) > 2: writer = codecs.open(sys.argv[2], 'w', 'utf-8') else: writer = STDOUT row = 0 for line in reader: row += 1 if 'SAMPA' in line: continue line = line.rstrip('\r\n') fields = line.split(',') assert len(fields) == 4 fields = [f.strip('"') for f in fields] for field in fields: assert '"' not in field if fields[0] == '': continue assert fields[3] in ('', '\\') writer.write('%s\n' % ','.join([str(row)] + fields[0:3])) reader.close() writer.close() return if __name__ == '__main__': main()
Add tool for tidying the Icelandic pron_dict CSV file.#! /usr/bin/python2 # -*- coding: utf-8 -*- # # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tool for tidying the format of WordList_IPA_SAMPA.csv. Example usage: $ ./clean_pron_dict_csv.py ../third_party/pron_dict/WordList_IPA_SAMPA.csv This tidies up the input CSV file and adds row numbers, to simplify processing by subsequent tools. """ from __future__ import unicode_literals import codecs import sys STDIN = codecs.getreader('utf-8')(sys.stdin) STDOUT = codecs.getwriter('utf-8')(sys.stdout) def main(): if len(sys.argv) > 1: reader = codecs.open(sys.argv[1], 'r', 'utf-8') else: reader = STDIN if len(sys.argv) > 2: writer = codecs.open(sys.argv[2], 'w', 'utf-8') else: writer = STDOUT row = 0 for line in reader: row += 1 if 'SAMPA' in line: continue line = line.rstrip('\r\n') fields = line.split(',') assert len(fields) == 4 fields = [f.strip('"') for f in fields] for field in fields: assert '"' not in field if fields[0] == '': continue assert fields[3] in ('', '\\') writer.write('%s\n' % ','.join([str(row)] + fields[0:3])) reader.close() writer.close() return if __name__ == '__main__': main()
<commit_before><commit_msg>Add tool for tidying the Icelandic pron_dict CSV file.<commit_after>#! /usr/bin/python2 # -*- coding: utf-8 -*- # # Copyright 2015 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tool for tidying the format of WordList_IPA_SAMPA.csv. Example usage: $ ./clean_pron_dict_csv.py ../third_party/pron_dict/WordList_IPA_SAMPA.csv This tidies up the input CSV file and adds row numbers, to simplify processing by subsequent tools. """ from __future__ import unicode_literals import codecs import sys STDIN = codecs.getreader('utf-8')(sys.stdin) STDOUT = codecs.getwriter('utf-8')(sys.stdout) def main(): if len(sys.argv) > 1: reader = codecs.open(sys.argv[1], 'r', 'utf-8') else: reader = STDIN if len(sys.argv) > 2: writer = codecs.open(sys.argv[2], 'w', 'utf-8') else: writer = STDOUT row = 0 for line in reader: row += 1 if 'SAMPA' in line: continue line = line.rstrip('\r\n') fields = line.split(',') assert len(fields) == 4 fields = [f.strip('"') for f in fields] for field in fields: assert '"' not in field if fields[0] == '': continue assert fields[3] in ('', '\\') writer.write('%s\n' % ','.join([str(row)] + fields[0:3])) reader.close() writer.close() return if __name__ == '__main__': main()
d956cfb04a1ae3c84460cec224dd4f5aba4012e1
lintcode/Medium/104_Merge_k_Sorted_Lists.py
lintcode/Medium/104_Merge_k_Sorted_Lists.py
""" Definition of ListNode class ListNode(object): def __init__(self, val, next=None): self.val = val self.next = next """ class Solution: """ @param lists: a list of ListNode @return: The head of one sorted list. """ def mergeKLists(self, lists): # write your code here # Solution 1 (TLE) # if (not lists): # return None # heads = [] # dummy = ListNode(0) # tmp = dummy # for l in lists: # if (l): # heads.append(l) # while(tmp): # mini = None # index = None # for i, h in enumerate(heads): # if (h and (mini is None or h.val < mini.val)): # mini = h # index = i # if (index >= 0): # if (heads[index].next is None): # heads.pop(index) # else: # heads[index] = heads[index].next # tmp.next = mini # tmp = tmp.next # return dummy.next # Solution 2 # def mergeTwoList(h1, h2): # dummy = ListNode(0) # tmp = dummy # while (h1 and h2): # if (h1.val < h2.val): # tmp.next = h1 # h1 = h1.next # else: # tmp.next = h2 # h2 = h2.next # tmp = tmp.next # if (not h1): # tmp.next = h2 # if (not h2): # tmp.next = h1 # return dummy.next # if (not lists): # return None # end = len(lists) - 1 # while (end > 0): # start = 0 # while (start < end): # lists[start] = mergeTwoList(lists[start], lists[end]) # start += 1 # end -= 1 # return lists[0] # Solution 3 q = [] dummy = ListNode(0) tmp = dummy def insertNode(n, arr): start = 0 end = len(arr) - 1 if (len(arr) == 0): arr.append(n) return True while (start < end): mid = (start + end) / 2 if (arr[mid].val == n.val): arr.insert(mid + 1, n) return True elif (arr[mid].val > n.val): end = mid - 1 else: start = mid + 1 if (arr[start].val < n.val): arr.insert(start+1, n) else: arr.insert(start, n) for node in lists: if (node): insertNode(node, q) while (q): node = q.pop(0) if (node.next): insertNode(node.next, q) tmp.next = node tmp = tmp.next return dummy.next
Add solution to lintcode question 104
Add solution to lintcode question 104
Python
mit
Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode,Rhadow/leetcode
Add solution to lintcode question 104
""" Definition of ListNode class ListNode(object): def __init__(self, val, next=None): self.val = val self.next = next """ class Solution: """ @param lists: a list of ListNode @return: The head of one sorted list. """ def mergeKLists(self, lists): # write your code here # Solution 1 (TLE) # if (not lists): # return None # heads = [] # dummy = ListNode(0) # tmp = dummy # for l in lists: # if (l): # heads.append(l) # while(tmp): # mini = None # index = None # for i, h in enumerate(heads): # if (h and (mini is None or h.val < mini.val)): # mini = h # index = i # if (index >= 0): # if (heads[index].next is None): # heads.pop(index) # else: # heads[index] = heads[index].next # tmp.next = mini # tmp = tmp.next # return dummy.next # Solution 2 # def mergeTwoList(h1, h2): # dummy = ListNode(0) # tmp = dummy # while (h1 and h2): # if (h1.val < h2.val): # tmp.next = h1 # h1 = h1.next # else: # tmp.next = h2 # h2 = h2.next # tmp = tmp.next # if (not h1): # tmp.next = h2 # if (not h2): # tmp.next = h1 # return dummy.next # if (not lists): # return None # end = len(lists) - 1 # while (end > 0): # start = 0 # while (start < end): # lists[start] = mergeTwoList(lists[start], lists[end]) # start += 1 # end -= 1 # return lists[0] # Solution 3 q = [] dummy = ListNode(0) tmp = dummy def insertNode(n, arr): start = 0 end = len(arr) - 1 if (len(arr) == 0): arr.append(n) return True while (start < end): mid = (start + end) / 2 if (arr[mid].val == n.val): arr.insert(mid + 1, n) return True elif (arr[mid].val > n.val): end = mid - 1 else: start = mid + 1 if (arr[start].val < n.val): arr.insert(start+1, n) else: arr.insert(start, n) for node in lists: if (node): insertNode(node, q) while (q): node = q.pop(0) if (node.next): insertNode(node.next, q) tmp.next = node tmp = tmp.next return dummy.next
<commit_before><commit_msg>Add solution to lintcode question 104<commit_after>
""" Definition of ListNode class ListNode(object): def __init__(self, val, next=None): self.val = val self.next = next """ class Solution: """ @param lists: a list of ListNode @return: The head of one sorted list. """ def mergeKLists(self, lists): # write your code here # Solution 1 (TLE) # if (not lists): # return None # heads = [] # dummy = ListNode(0) # tmp = dummy # for l in lists: # if (l): # heads.append(l) # while(tmp): # mini = None # index = None # for i, h in enumerate(heads): # if (h and (mini is None or h.val < mini.val)): # mini = h # index = i # if (index >= 0): # if (heads[index].next is None): # heads.pop(index) # else: # heads[index] = heads[index].next # tmp.next = mini # tmp = tmp.next # return dummy.next # Solution 2 # def mergeTwoList(h1, h2): # dummy = ListNode(0) # tmp = dummy # while (h1 and h2): # if (h1.val < h2.val): # tmp.next = h1 # h1 = h1.next # else: # tmp.next = h2 # h2 = h2.next # tmp = tmp.next # if (not h1): # tmp.next = h2 # if (not h2): # tmp.next = h1 # return dummy.next # if (not lists): # return None # end = len(lists) - 1 # while (end > 0): # start = 0 # while (start < end): # lists[start] = mergeTwoList(lists[start], lists[end]) # start += 1 # end -= 1 # return lists[0] # Solution 3 q = [] dummy = ListNode(0) tmp = dummy def insertNode(n, arr): start = 0 end = len(arr) - 1 if (len(arr) == 0): arr.append(n) return True while (start < end): mid = (start + end) / 2 if (arr[mid].val == n.val): arr.insert(mid + 1, n) return True elif (arr[mid].val > n.val): end = mid - 1 else: start = mid + 1 if (arr[start].val < n.val): arr.insert(start+1, n) else: arr.insert(start, n) for node in lists: if (node): insertNode(node, q) while (q): node = q.pop(0) if (node.next): insertNode(node.next, q) tmp.next = node tmp = tmp.next return dummy.next
Add solution to lintcode question 104""" Definition of ListNode class ListNode(object): def __init__(self, val, next=None): self.val = val self.next = next """ class Solution: """ @param lists: a list of ListNode @return: The head of one sorted list. """ def mergeKLists(self, lists): # write your code here # Solution 1 (TLE) # if (not lists): # return None # heads = [] # dummy = ListNode(0) # tmp = dummy # for l in lists: # if (l): # heads.append(l) # while(tmp): # mini = None # index = None # for i, h in enumerate(heads): # if (h and (mini is None or h.val < mini.val)): # mini = h # index = i # if (index >= 0): # if (heads[index].next is None): # heads.pop(index) # else: # heads[index] = heads[index].next # tmp.next = mini # tmp = tmp.next # return dummy.next # Solution 2 # def mergeTwoList(h1, h2): # dummy = ListNode(0) # tmp = dummy # while (h1 and h2): # if (h1.val < h2.val): # tmp.next = h1 # h1 = h1.next # else: # tmp.next = h2 # h2 = h2.next # tmp = tmp.next # if (not h1): # tmp.next = h2 # if (not h2): # tmp.next = h1 # return dummy.next # if (not lists): # return None # end = len(lists) - 1 # while (end > 0): # start = 0 # while (start < end): # lists[start] = mergeTwoList(lists[start], lists[end]) # start += 1 # end -= 1 # return lists[0] # Solution 3 q = [] dummy = ListNode(0) tmp = dummy def insertNode(n, arr): start = 0 end = len(arr) - 1 if (len(arr) == 0): arr.append(n) return True while (start < end): mid = (start + end) / 2 if (arr[mid].val == n.val): arr.insert(mid + 1, n) return True elif (arr[mid].val > n.val): end = mid - 1 else: start = mid + 1 if (arr[start].val < n.val): arr.insert(start+1, n) else: arr.insert(start, n) for node in lists: if (node): insertNode(node, q) while (q): node = q.pop(0) if (node.next): insertNode(node.next, q) tmp.next = node tmp = tmp.next return dummy.next
<commit_before><commit_msg>Add solution to lintcode question 104<commit_after>""" Definition of ListNode class ListNode(object): def __init__(self, val, next=None): self.val = val self.next = next """ class Solution: """ @param lists: a list of ListNode @return: The head of one sorted list. """ def mergeKLists(self, lists): # write your code here # Solution 1 (TLE) # if (not lists): # return None # heads = [] # dummy = ListNode(0) # tmp = dummy # for l in lists: # if (l): # heads.append(l) # while(tmp): # mini = None # index = None # for i, h in enumerate(heads): # if (h and (mini is None or h.val < mini.val)): # mini = h # index = i # if (index >= 0): # if (heads[index].next is None): # heads.pop(index) # else: # heads[index] = heads[index].next # tmp.next = mini # tmp = tmp.next # return dummy.next # Solution 2 # def mergeTwoList(h1, h2): # dummy = ListNode(0) # tmp = dummy # while (h1 and h2): # if (h1.val < h2.val): # tmp.next = h1 # h1 = h1.next # else: # tmp.next = h2 # h2 = h2.next # tmp = tmp.next # if (not h1): # tmp.next = h2 # if (not h2): # tmp.next = h1 # return dummy.next # if (not lists): # return None # end = len(lists) - 1 # while (end > 0): # start = 0 # while (start < end): # lists[start] = mergeTwoList(lists[start], lists[end]) # start += 1 # end -= 1 # return lists[0] # Solution 3 q = [] dummy = ListNode(0) tmp = dummy def insertNode(n, arr): start = 0 end = len(arr) - 1 if (len(arr) == 0): arr.append(n) return True while (start < end): mid = (start + end) / 2 if (arr[mid].val == n.val): arr.insert(mid + 1, n) return True elif (arr[mid].val > n.val): end = mid - 1 else: start = mid + 1 if (arr[start].val < n.val): arr.insert(start+1, n) else: arr.insert(start, n) for node in lists: if (node): insertNode(node, q) while (q): node = q.pop(0) if (node.next): insertNode(node.next, q) tmp.next = node tmp = tmp.next return dummy.next
e04213330dda10ab1ec114e158ad854bdedd7058
Doc/tools/cvsinfo.py
Doc/tools/cvsinfo.py
"""Utility class and function to get information about the CVS repository based on checked-out files. """ import os def get_repository_list(paths): d = {} for name in paths: if os.path.isfile(name): dir = os.path.dirname(name) else: dir = name rootfile = os.path.join(name, "CVS", "Root") root = open(rootfile).readline().strip() if not d.has_key(root): d[root] = RepositoryInfo(dir), [name] else: d[root][1].append(name) return d.values() class RepositoryInfo: """Record holding information about the repository we want to talk to.""" cvsroot_path = None branch = None # type is '', ':ext', or ':pserver:' type = "" def __init__(self, dir=None): if dir is None: dir = os.getcwd() dir = os.path.join(dir, "CVS") root = open(os.path.join(dir, "Root")).readline().strip() if root.startswith(":pserver:"): self.type = ":pserver:" root = root[len(":pserver:"):] elif ":" in root: if root.startswith(":ext:"): root = root[len(":ext:"):] self.type = ":ext:" self.repository = root if ":" in root: host, path = root.split(":", 1) self.cvsroot_path = path else: self.cvsroot_path = root fn = os.path.join(dir, "Tag") if os.path.isfile(fn): self.branch = open(fn).readline().strip()[1:] def get_cvsroot(self): return self.type + self.repository _repository_dir_cache = {} def get_repository_file(self, path): filename = os.path.abspath(path) if os.path.isdir(path): dir = path join = 0 else: dir = os.path.dirname(path) join = 1 try: repodir = self._repository_dir_cache[dir] except KeyError: repofn = os.path.join(dir, "CVS", "Repository") repodir = open(repofn).readline().strip() repodir = os.path.join(self.cvsroot_path, repodir) self._repository_dir_cache[dir] = repodir if join: fn = os.path.join(repodir, os.path.basename(path)) else: fn = repodir return fn[len(self.cvsroot_path)+1:] def __repr__(self): return "<RepositoryInfo for %s>" % `self.get_cvsroot()`
Support module to help work with checked-out CVS trees.
Support module to help work with checked-out CVS trees.
Python
mit
sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator
Support module to help work with checked-out CVS trees.
"""Utility class and function to get information about the CVS repository based on checked-out files. """ import os def get_repository_list(paths): d = {} for name in paths: if os.path.isfile(name): dir = os.path.dirname(name) else: dir = name rootfile = os.path.join(name, "CVS", "Root") root = open(rootfile).readline().strip() if not d.has_key(root): d[root] = RepositoryInfo(dir), [name] else: d[root][1].append(name) return d.values() class RepositoryInfo: """Record holding information about the repository we want to talk to.""" cvsroot_path = None branch = None # type is '', ':ext', or ':pserver:' type = "" def __init__(self, dir=None): if dir is None: dir = os.getcwd() dir = os.path.join(dir, "CVS") root = open(os.path.join(dir, "Root")).readline().strip() if root.startswith(":pserver:"): self.type = ":pserver:" root = root[len(":pserver:"):] elif ":" in root: if root.startswith(":ext:"): root = root[len(":ext:"):] self.type = ":ext:" self.repository = root if ":" in root: host, path = root.split(":", 1) self.cvsroot_path = path else: self.cvsroot_path = root fn = os.path.join(dir, "Tag") if os.path.isfile(fn): self.branch = open(fn).readline().strip()[1:] def get_cvsroot(self): return self.type + self.repository _repository_dir_cache = {} def get_repository_file(self, path): filename = os.path.abspath(path) if os.path.isdir(path): dir = path join = 0 else: dir = os.path.dirname(path) join = 1 try: repodir = self._repository_dir_cache[dir] except KeyError: repofn = os.path.join(dir, "CVS", "Repository") repodir = open(repofn).readline().strip() repodir = os.path.join(self.cvsroot_path, repodir) self._repository_dir_cache[dir] = repodir if join: fn = os.path.join(repodir, os.path.basename(path)) else: fn = repodir return fn[len(self.cvsroot_path)+1:] def __repr__(self): return "<RepositoryInfo for %s>" % `self.get_cvsroot()`
<commit_before><commit_msg>Support module to help work with checked-out CVS trees.<commit_after>
"""Utility class and function to get information about the CVS repository based on checked-out files. """ import os def get_repository_list(paths): d = {} for name in paths: if os.path.isfile(name): dir = os.path.dirname(name) else: dir = name rootfile = os.path.join(name, "CVS", "Root") root = open(rootfile).readline().strip() if not d.has_key(root): d[root] = RepositoryInfo(dir), [name] else: d[root][1].append(name) return d.values() class RepositoryInfo: """Record holding information about the repository we want to talk to.""" cvsroot_path = None branch = None # type is '', ':ext', or ':pserver:' type = "" def __init__(self, dir=None): if dir is None: dir = os.getcwd() dir = os.path.join(dir, "CVS") root = open(os.path.join(dir, "Root")).readline().strip() if root.startswith(":pserver:"): self.type = ":pserver:" root = root[len(":pserver:"):] elif ":" in root: if root.startswith(":ext:"): root = root[len(":ext:"):] self.type = ":ext:" self.repository = root if ":" in root: host, path = root.split(":", 1) self.cvsroot_path = path else: self.cvsroot_path = root fn = os.path.join(dir, "Tag") if os.path.isfile(fn): self.branch = open(fn).readline().strip()[1:] def get_cvsroot(self): return self.type + self.repository _repository_dir_cache = {} def get_repository_file(self, path): filename = os.path.abspath(path) if os.path.isdir(path): dir = path join = 0 else: dir = os.path.dirname(path) join = 1 try: repodir = self._repository_dir_cache[dir] except KeyError: repofn = os.path.join(dir, "CVS", "Repository") repodir = open(repofn).readline().strip() repodir = os.path.join(self.cvsroot_path, repodir) self._repository_dir_cache[dir] = repodir if join: fn = os.path.join(repodir, os.path.basename(path)) else: fn = repodir return fn[len(self.cvsroot_path)+1:] def __repr__(self): return "<RepositoryInfo for %s>" % `self.get_cvsroot()`
Support module to help work with checked-out CVS trees."""Utility class and function to get information about the CVS repository based on checked-out files. """ import os def get_repository_list(paths): d = {} for name in paths: if os.path.isfile(name): dir = os.path.dirname(name) else: dir = name rootfile = os.path.join(name, "CVS", "Root") root = open(rootfile).readline().strip() if not d.has_key(root): d[root] = RepositoryInfo(dir), [name] else: d[root][1].append(name) return d.values() class RepositoryInfo: """Record holding information about the repository we want to talk to.""" cvsroot_path = None branch = None # type is '', ':ext', or ':pserver:' type = "" def __init__(self, dir=None): if dir is None: dir = os.getcwd() dir = os.path.join(dir, "CVS") root = open(os.path.join(dir, "Root")).readline().strip() if root.startswith(":pserver:"): self.type = ":pserver:" root = root[len(":pserver:"):] elif ":" in root: if root.startswith(":ext:"): root = root[len(":ext:"):] self.type = ":ext:" self.repository = root if ":" in root: host, path = root.split(":", 1) self.cvsroot_path = path else: self.cvsroot_path = root fn = os.path.join(dir, "Tag") if os.path.isfile(fn): self.branch = open(fn).readline().strip()[1:] def get_cvsroot(self): return self.type + self.repository _repository_dir_cache = {} def get_repository_file(self, path): filename = os.path.abspath(path) if os.path.isdir(path): dir = path join = 0 else: dir = os.path.dirname(path) join = 1 try: repodir = self._repository_dir_cache[dir] except KeyError: repofn = os.path.join(dir, "CVS", "Repository") repodir = open(repofn).readline().strip() repodir = os.path.join(self.cvsroot_path, repodir) self._repository_dir_cache[dir] = repodir if join: fn = os.path.join(repodir, os.path.basename(path)) else: fn = repodir return fn[len(self.cvsroot_path)+1:] def __repr__(self): return "<RepositoryInfo for %s>" % `self.get_cvsroot()`
<commit_before><commit_msg>Support module to help work with checked-out CVS trees.<commit_after>"""Utility class and function to get information about the CVS repository based on checked-out files. """ import os def get_repository_list(paths): d = {} for name in paths: if os.path.isfile(name): dir = os.path.dirname(name) else: dir = name rootfile = os.path.join(name, "CVS", "Root") root = open(rootfile).readline().strip() if not d.has_key(root): d[root] = RepositoryInfo(dir), [name] else: d[root][1].append(name) return d.values() class RepositoryInfo: """Record holding information about the repository we want to talk to.""" cvsroot_path = None branch = None # type is '', ':ext', or ':pserver:' type = "" def __init__(self, dir=None): if dir is None: dir = os.getcwd() dir = os.path.join(dir, "CVS") root = open(os.path.join(dir, "Root")).readline().strip() if root.startswith(":pserver:"): self.type = ":pserver:" root = root[len(":pserver:"):] elif ":" in root: if root.startswith(":ext:"): root = root[len(":ext:"):] self.type = ":ext:" self.repository = root if ":" in root: host, path = root.split(":", 1) self.cvsroot_path = path else: self.cvsroot_path = root fn = os.path.join(dir, "Tag") if os.path.isfile(fn): self.branch = open(fn).readline().strip()[1:] def get_cvsroot(self): return self.type + self.repository _repository_dir_cache = {} def get_repository_file(self, path): filename = os.path.abspath(path) if os.path.isdir(path): dir = path join = 0 else: dir = os.path.dirname(path) join = 1 try: repodir = self._repository_dir_cache[dir] except KeyError: repofn = os.path.join(dir, "CVS", "Repository") repodir = open(repofn).readline().strip() repodir = os.path.join(self.cvsroot_path, repodir) self._repository_dir_cache[dir] = repodir if join: fn = os.path.join(repodir, os.path.basename(path)) else: fn = repodir return fn[len(self.cvsroot_path)+1:] def __repr__(self): return "<RepositoryInfo for %s>" % `self.get_cvsroot()`
6bc10fd6d00593c3ad192a4bf528e9e01dd605c3
exa/relational/__init__.py
exa/relational/__init__.py
# -*- coding: utf-8 -*- # Copyright (c) 2015-2016, Exa Analytics Development Team # Distributed under the terms of the Apache License 2.0 """ Relational #################### This (sub)package is provides the content management framework for container objects and a collection of static data for reference and unit conversions. """ from exa.relational.isotope import Isotope #from exa.relational.constant import Constant #from exa.relational.unit import Length, Mass, Time, Current, Amount, Luminosity #from exa.relational.unit import Dose, Acceleration, Charge, Dipole, Energy #from exa.relational.unit import Force, Frequency, MolarMass #from exa.relational.project import Project #from exa.relational.job import Job #from exa.relational.file import DataFile, ContainerFile #from exa.relational import test
# -*- coding: utf-8 -*- # Copyright (c) 2015-2016, Exa Analytics Development Team # Distributed under the terms of the Apache License 2.0 """ Relational #################### This (sub)package is provides the content management framework for container objects and a collection of static data for reference and unit conversions. """ #from exa.relational.isotope import Isotope #from exa.relational.constant import Constant #from exa.relational.unit import Length, Mass, Time, Current, Amount, Luminosity #from exa.relational.unit import Dose, Acceleration, Charge, Dipole, Energy #from exa.relational.unit import Force, Frequency, MolarMass #from exa.relational.project import Project #from exa.relational.job import Job #from exa.relational.file import DataFile, ContainerFile #from exa.relational import test
Comment out relational for now; working on workflows, parallelism first
Comment out relational for now; working on workflows, parallelism first
Python
apache-2.0
alexvmarch/exa,alexvmarch/exa,exa-analytics/exa,avmarchenko/exa,alexvmarch/exa,tjduigna/exa,exa-analytics/exa
# -*- coding: utf-8 -*- # Copyright (c) 2015-2016, Exa Analytics Development Team # Distributed under the terms of the Apache License 2.0 """ Relational #################### This (sub)package is provides the content management framework for container objects and a collection of static data for reference and unit conversions. """ from exa.relational.isotope import Isotope #from exa.relational.constant import Constant #from exa.relational.unit import Length, Mass, Time, Current, Amount, Luminosity #from exa.relational.unit import Dose, Acceleration, Charge, Dipole, Energy #from exa.relational.unit import Force, Frequency, MolarMass #from exa.relational.project import Project #from exa.relational.job import Job #from exa.relational.file import DataFile, ContainerFile #from exa.relational import test Comment out relational for now; working on workflows, parallelism first
# -*- coding: utf-8 -*- # Copyright (c) 2015-2016, Exa Analytics Development Team # Distributed under the terms of the Apache License 2.0 """ Relational #################### This (sub)package is provides the content management framework for container objects and a collection of static data for reference and unit conversions. """ #from exa.relational.isotope import Isotope #from exa.relational.constant import Constant #from exa.relational.unit import Length, Mass, Time, Current, Amount, Luminosity #from exa.relational.unit import Dose, Acceleration, Charge, Dipole, Energy #from exa.relational.unit import Force, Frequency, MolarMass #from exa.relational.project import Project #from exa.relational.job import Job #from exa.relational.file import DataFile, ContainerFile #from exa.relational import test
<commit_before># -*- coding: utf-8 -*- # Copyright (c) 2015-2016, Exa Analytics Development Team # Distributed under the terms of the Apache License 2.0 """ Relational #################### This (sub)package is provides the content management framework for container objects and a collection of static data for reference and unit conversions. """ from exa.relational.isotope import Isotope #from exa.relational.constant import Constant #from exa.relational.unit import Length, Mass, Time, Current, Amount, Luminosity #from exa.relational.unit import Dose, Acceleration, Charge, Dipole, Energy #from exa.relational.unit import Force, Frequency, MolarMass #from exa.relational.project import Project #from exa.relational.job import Job #from exa.relational.file import DataFile, ContainerFile #from exa.relational import test <commit_msg>Comment out relational for now; working on workflows, parallelism first<commit_after>
# -*- coding: utf-8 -*- # Copyright (c) 2015-2016, Exa Analytics Development Team # Distributed under the terms of the Apache License 2.0 """ Relational #################### This (sub)package is provides the content management framework for container objects and a collection of static data for reference and unit conversions. """ #from exa.relational.isotope import Isotope #from exa.relational.constant import Constant #from exa.relational.unit import Length, Mass, Time, Current, Amount, Luminosity #from exa.relational.unit import Dose, Acceleration, Charge, Dipole, Energy #from exa.relational.unit import Force, Frequency, MolarMass #from exa.relational.project import Project #from exa.relational.job import Job #from exa.relational.file import DataFile, ContainerFile #from exa.relational import test
# -*- coding: utf-8 -*- # Copyright (c) 2015-2016, Exa Analytics Development Team # Distributed under the terms of the Apache License 2.0 """ Relational #################### This (sub)package is provides the content management framework for container objects and a collection of static data for reference and unit conversions. """ from exa.relational.isotope import Isotope #from exa.relational.constant import Constant #from exa.relational.unit import Length, Mass, Time, Current, Amount, Luminosity #from exa.relational.unit import Dose, Acceleration, Charge, Dipole, Energy #from exa.relational.unit import Force, Frequency, MolarMass #from exa.relational.project import Project #from exa.relational.job import Job #from exa.relational.file import DataFile, ContainerFile #from exa.relational import test Comment out relational for now; working on workflows, parallelism first# -*- coding: utf-8 -*- # Copyright (c) 2015-2016, Exa Analytics Development Team # Distributed under the terms of the Apache License 2.0 """ Relational #################### This (sub)package is provides the content management framework for container objects and a collection of static data for reference and unit conversions. """ #from exa.relational.isotope import Isotope #from exa.relational.constant import Constant #from exa.relational.unit import Length, Mass, Time, Current, Amount, Luminosity #from exa.relational.unit import Dose, Acceleration, Charge, Dipole, Energy #from exa.relational.unit import Force, Frequency, MolarMass #from exa.relational.project import Project #from exa.relational.job import Job #from exa.relational.file import DataFile, ContainerFile #from exa.relational import test
<commit_before># -*- coding: utf-8 -*- # Copyright (c) 2015-2016, Exa Analytics Development Team # Distributed under the terms of the Apache License 2.0 """ Relational #################### This (sub)package is provides the content management framework for container objects and a collection of static data for reference and unit conversions. """ from exa.relational.isotope import Isotope #from exa.relational.constant import Constant #from exa.relational.unit import Length, Mass, Time, Current, Amount, Luminosity #from exa.relational.unit import Dose, Acceleration, Charge, Dipole, Energy #from exa.relational.unit import Force, Frequency, MolarMass #from exa.relational.project import Project #from exa.relational.job import Job #from exa.relational.file import DataFile, ContainerFile #from exa.relational import test <commit_msg>Comment out relational for now; working on workflows, parallelism first<commit_after># -*- coding: utf-8 -*- # Copyright (c) 2015-2016, Exa Analytics Development Team # Distributed under the terms of the Apache License 2.0 """ Relational #################### This (sub)package is provides the content management framework for container objects and a collection of static data for reference and unit conversions. """ #from exa.relational.isotope import Isotope #from exa.relational.constant import Constant #from exa.relational.unit import Length, Mass, Time, Current, Amount, Luminosity #from exa.relational.unit import Dose, Acceleration, Charge, Dipole, Energy #from exa.relational.unit import Force, Frequency, MolarMass #from exa.relational.project import Project #from exa.relational.job import Job #from exa.relational.file import DataFile, ContainerFile #from exa.relational import test
d43b5aee3d172adf0fc59517e458ee92775b3917
h2o-py/tests/testdir_algos/deepwater/pyunit_demo_deepwater.py
h2o-py/tests/testdir_algos/deepwater/pyunit_demo_deepwater.py
from __future__ import print_function from builtins import range import sys, os sys.path.insert(1, os.path.join("..","..","..")) import h2o from tests import pyunit_utils import h2o, tests from h2o.estimators.deepwater import H2ODeepWaterEstimator def deepwater_demo(): # Training data train_data = h2o.import_file(path=tests.locate("smalldata/gbm_test/ecology_model.csv")) train_data = train_data.drop('Site') train_data['Angaus'] = train_data['Angaus'].asfactor() print(train_data.describe()) train_data.head() # Testing data test_data = h2o.import_file(path=tests.locate("smalldata/gbm_test/ecology_eval.csv")) test_data['Angaus'] = test_data['Angaus'].asfactor() print(test_data.describe()) test_data.head() # Run DeepWater (ideally, use a GPU - this would be slow on CPUs) dl = H2ODeepWaterEstimator(epochs=50, hidden=[4096,4096,4096], hidden_dropout_ratios=[0.2,0.2,0.2]) dl.train(x=list(range(1,train_data.ncol)), y="Angaus", training_frame=train_data, validation_frame=test_data) dl.show() if __name__ == "__main__": pyunit_utils.standalone_test(deepwater_demo) else: deepwater_demo()
Add pyunit for deepwater example (huge FC network).
Add pyunit for deepwater example (huge FC network).
Python
apache-2.0
jangorecki/h2o-3,mathemage/h2o-3,jangorecki/h2o-3,h2oai/h2o-dev,jangorecki/h2o-3,michalkurka/h2o-3,spennihana/h2o-3,h2oai/h2o-3,michalkurka/h2o-3,h2oai/h2o-dev,spennihana/h2o-3,h2oai/h2o-dev,jangorecki/h2o-3,mathemage/h2o-3,mathemage/h2o-3,h2oai/h2o-3,michalkurka/h2o-3,h2oai/h2o-dev,spennihana/h2o-3,michalkurka/h2o-3,spennihana/h2o-3,mathemage/h2o-3,jangorecki/h2o-3,h2oai/h2o-dev,h2oai/h2o-3,michalkurka/h2o-3,jangorecki/h2o-3,h2oai/h2o-3,mathemage/h2o-3,h2oai/h2o-dev,h2oai/h2o-3,h2oai/h2o-3,h2oai/h2o-dev,spennihana/h2o-3,h2oai/h2o-3,jangorecki/h2o-3,spennihana/h2o-3,spennihana/h2o-3,michalkurka/h2o-3,michalkurka/h2o-3,h2oai/h2o-3,mathemage/h2o-3,mathemage/h2o-3
Add pyunit for deepwater example (huge FC network).
from __future__ import print_function from builtins import range import sys, os sys.path.insert(1, os.path.join("..","..","..")) import h2o from tests import pyunit_utils import h2o, tests from h2o.estimators.deepwater import H2ODeepWaterEstimator def deepwater_demo(): # Training data train_data = h2o.import_file(path=tests.locate("smalldata/gbm_test/ecology_model.csv")) train_data = train_data.drop('Site') train_data['Angaus'] = train_data['Angaus'].asfactor() print(train_data.describe()) train_data.head() # Testing data test_data = h2o.import_file(path=tests.locate("smalldata/gbm_test/ecology_eval.csv")) test_data['Angaus'] = test_data['Angaus'].asfactor() print(test_data.describe()) test_data.head() # Run DeepWater (ideally, use a GPU - this would be slow on CPUs) dl = H2ODeepWaterEstimator(epochs=50, hidden=[4096,4096,4096], hidden_dropout_ratios=[0.2,0.2,0.2]) dl.train(x=list(range(1,train_data.ncol)), y="Angaus", training_frame=train_data, validation_frame=test_data) dl.show() if __name__ == "__main__": pyunit_utils.standalone_test(deepwater_demo) else: deepwater_demo()
<commit_before><commit_msg>Add pyunit for deepwater example (huge FC network).<commit_after>
from __future__ import print_function from builtins import range import sys, os sys.path.insert(1, os.path.join("..","..","..")) import h2o from tests import pyunit_utils import h2o, tests from h2o.estimators.deepwater import H2ODeepWaterEstimator def deepwater_demo(): # Training data train_data = h2o.import_file(path=tests.locate("smalldata/gbm_test/ecology_model.csv")) train_data = train_data.drop('Site') train_data['Angaus'] = train_data['Angaus'].asfactor() print(train_data.describe()) train_data.head() # Testing data test_data = h2o.import_file(path=tests.locate("smalldata/gbm_test/ecology_eval.csv")) test_data['Angaus'] = test_data['Angaus'].asfactor() print(test_data.describe()) test_data.head() # Run DeepWater (ideally, use a GPU - this would be slow on CPUs) dl = H2ODeepWaterEstimator(epochs=50, hidden=[4096,4096,4096], hidden_dropout_ratios=[0.2,0.2,0.2]) dl.train(x=list(range(1,train_data.ncol)), y="Angaus", training_frame=train_data, validation_frame=test_data) dl.show() if __name__ == "__main__": pyunit_utils.standalone_test(deepwater_demo) else: deepwater_demo()
Add pyunit for deepwater example (huge FC network).from __future__ import print_function from builtins import range import sys, os sys.path.insert(1, os.path.join("..","..","..")) import h2o from tests import pyunit_utils import h2o, tests from h2o.estimators.deepwater import H2ODeepWaterEstimator def deepwater_demo(): # Training data train_data = h2o.import_file(path=tests.locate("smalldata/gbm_test/ecology_model.csv")) train_data = train_data.drop('Site') train_data['Angaus'] = train_data['Angaus'].asfactor() print(train_data.describe()) train_data.head() # Testing data test_data = h2o.import_file(path=tests.locate("smalldata/gbm_test/ecology_eval.csv")) test_data['Angaus'] = test_data['Angaus'].asfactor() print(test_data.describe()) test_data.head() # Run DeepWater (ideally, use a GPU - this would be slow on CPUs) dl = H2ODeepWaterEstimator(epochs=50, hidden=[4096,4096,4096], hidden_dropout_ratios=[0.2,0.2,0.2]) dl.train(x=list(range(1,train_data.ncol)), y="Angaus", training_frame=train_data, validation_frame=test_data) dl.show() if __name__ == "__main__": pyunit_utils.standalone_test(deepwater_demo) else: deepwater_demo()
<commit_before><commit_msg>Add pyunit for deepwater example (huge FC network).<commit_after>from __future__ import print_function from builtins import range import sys, os sys.path.insert(1, os.path.join("..","..","..")) import h2o from tests import pyunit_utils import h2o, tests from h2o.estimators.deepwater import H2ODeepWaterEstimator def deepwater_demo(): # Training data train_data = h2o.import_file(path=tests.locate("smalldata/gbm_test/ecology_model.csv")) train_data = train_data.drop('Site') train_data['Angaus'] = train_data['Angaus'].asfactor() print(train_data.describe()) train_data.head() # Testing data test_data = h2o.import_file(path=tests.locate("smalldata/gbm_test/ecology_eval.csv")) test_data['Angaus'] = test_data['Angaus'].asfactor() print(test_data.describe()) test_data.head() # Run DeepWater (ideally, use a GPU - this would be slow on CPUs) dl = H2ODeepWaterEstimator(epochs=50, hidden=[4096,4096,4096], hidden_dropout_ratios=[0.2,0.2,0.2]) dl.train(x=list(range(1,train_data.ncol)), y="Angaus", training_frame=train_data, validation_frame=test_data) dl.show() if __name__ == "__main__": pyunit_utils.standalone_test(deepwater_demo) else: deepwater_demo()
92556eade9572bcba06c04d9f4d9ec54d173505c
sprint_dates.py
sprint_dates.py
#!/usr/bin/env python # encoding: utf-8 """ sprint_dates.py Created by Rod Docking on 2017-06-13. Copyright (c) 2017 Canada's Michael Smith Genome Sciences Centre. All rights reserved. """ import argparse from dateutil.parser import parse from dateutil.relativedelta import relativedelta, FR def _parse_args(): parser = argparse.ArgumentParser( description="""Sprint Planning Template""") parser.add_argument( '-s', '--sprint_num', type=int, help='Sprint number', required=True) parser.add_argument( '-d', '--start_date', type=str, help='Sprint start date', required=True) args = parser.parse_args() return args def main(): """Generate dates for upcoming sprints""" # Parse command-line arguments args = _parse_args() # Calculate sprint end dates # Use the dateutil package to calculate 'next Friday' sprint_num = args.sprint_num start_datetime = parse(args.start_date) end_datetime = start_datetime + relativedelta(weekday=FR(+2)) # Print the dates as strings i = 0 while i < 20: print "Sprint {num} - {start_date} - {end_date}".format( num=sprint_num, start_date=start_datetime.date(), end_date=end_datetime.date() ) sprint_num += 1 start_datetime = start_datetime + relativedelta(weeks=2) end_datetime = end_datetime + relativedelta(weeks=2) i += 1 if __name__ == '__main__': main()
Add script for generating sprint dates
Add script for generating sprint dates
Python
mit
rdocking/bits_and_bobs
Add script for generating sprint dates
#!/usr/bin/env python # encoding: utf-8 """ sprint_dates.py Created by Rod Docking on 2017-06-13. Copyright (c) 2017 Canada's Michael Smith Genome Sciences Centre. All rights reserved. """ import argparse from dateutil.parser import parse from dateutil.relativedelta import relativedelta, FR def _parse_args(): parser = argparse.ArgumentParser( description="""Sprint Planning Template""") parser.add_argument( '-s', '--sprint_num', type=int, help='Sprint number', required=True) parser.add_argument( '-d', '--start_date', type=str, help='Sprint start date', required=True) args = parser.parse_args() return args def main(): """Generate dates for upcoming sprints""" # Parse command-line arguments args = _parse_args() # Calculate sprint end dates # Use the dateutil package to calculate 'next Friday' sprint_num = args.sprint_num start_datetime = parse(args.start_date) end_datetime = start_datetime + relativedelta(weekday=FR(+2)) # Print the dates as strings i = 0 while i < 20: print "Sprint {num} - {start_date} - {end_date}".format( num=sprint_num, start_date=start_datetime.date(), end_date=end_datetime.date() ) sprint_num += 1 start_datetime = start_datetime + relativedelta(weeks=2) end_datetime = end_datetime + relativedelta(weeks=2) i += 1 if __name__ == '__main__': main()
<commit_before><commit_msg>Add script for generating sprint dates<commit_after>
#!/usr/bin/env python # encoding: utf-8 """ sprint_dates.py Created by Rod Docking on 2017-06-13. Copyright (c) 2017 Canada's Michael Smith Genome Sciences Centre. All rights reserved. """ import argparse from dateutil.parser import parse from dateutil.relativedelta import relativedelta, FR def _parse_args(): parser = argparse.ArgumentParser( description="""Sprint Planning Template""") parser.add_argument( '-s', '--sprint_num', type=int, help='Sprint number', required=True) parser.add_argument( '-d', '--start_date', type=str, help='Sprint start date', required=True) args = parser.parse_args() return args def main(): """Generate dates for upcoming sprints""" # Parse command-line arguments args = _parse_args() # Calculate sprint end dates # Use the dateutil package to calculate 'next Friday' sprint_num = args.sprint_num start_datetime = parse(args.start_date) end_datetime = start_datetime + relativedelta(weekday=FR(+2)) # Print the dates as strings i = 0 while i < 20: print "Sprint {num} - {start_date} - {end_date}".format( num=sprint_num, start_date=start_datetime.date(), end_date=end_datetime.date() ) sprint_num += 1 start_datetime = start_datetime + relativedelta(weeks=2) end_datetime = end_datetime + relativedelta(weeks=2) i += 1 if __name__ == '__main__': main()
Add script for generating sprint dates#!/usr/bin/env python # encoding: utf-8 """ sprint_dates.py Created by Rod Docking on 2017-06-13. Copyright (c) 2017 Canada's Michael Smith Genome Sciences Centre. All rights reserved. """ import argparse from dateutil.parser import parse from dateutil.relativedelta import relativedelta, FR def _parse_args(): parser = argparse.ArgumentParser( description="""Sprint Planning Template""") parser.add_argument( '-s', '--sprint_num', type=int, help='Sprint number', required=True) parser.add_argument( '-d', '--start_date', type=str, help='Sprint start date', required=True) args = parser.parse_args() return args def main(): """Generate dates for upcoming sprints""" # Parse command-line arguments args = _parse_args() # Calculate sprint end dates # Use the dateutil package to calculate 'next Friday' sprint_num = args.sprint_num start_datetime = parse(args.start_date) end_datetime = start_datetime + relativedelta(weekday=FR(+2)) # Print the dates as strings i = 0 while i < 20: print "Sprint {num} - {start_date} - {end_date}".format( num=sprint_num, start_date=start_datetime.date(), end_date=end_datetime.date() ) sprint_num += 1 start_datetime = start_datetime + relativedelta(weeks=2) end_datetime = end_datetime + relativedelta(weeks=2) i += 1 if __name__ == '__main__': main()
<commit_before><commit_msg>Add script for generating sprint dates<commit_after>#!/usr/bin/env python # encoding: utf-8 """ sprint_dates.py Created by Rod Docking on 2017-06-13. Copyright (c) 2017 Canada's Michael Smith Genome Sciences Centre. All rights reserved. """ import argparse from dateutil.parser import parse from dateutil.relativedelta import relativedelta, FR def _parse_args(): parser = argparse.ArgumentParser( description="""Sprint Planning Template""") parser.add_argument( '-s', '--sprint_num', type=int, help='Sprint number', required=True) parser.add_argument( '-d', '--start_date', type=str, help='Sprint start date', required=True) args = parser.parse_args() return args def main(): """Generate dates for upcoming sprints""" # Parse command-line arguments args = _parse_args() # Calculate sprint end dates # Use the dateutil package to calculate 'next Friday' sprint_num = args.sprint_num start_datetime = parse(args.start_date) end_datetime = start_datetime + relativedelta(weekday=FR(+2)) # Print the dates as strings i = 0 while i < 20: print "Sprint {num} - {start_date} - {end_date}".format( num=sprint_num, start_date=start_datetime.date(), end_date=end_datetime.date() ) sprint_num += 1 start_datetime = start_datetime + relativedelta(weeks=2) end_datetime = end_datetime + relativedelta(weeks=2) i += 1 if __name__ == '__main__': main()
7784e15410e9b3aa0b39d8732fc05aa9cc0d2acf
pyramid_authsanity/tests/test_util.py
pyramid_authsanity/tests/test_util.py
from pyramid_authsanity.util import add_vary_callback class TestAddVaryCallback(object): def _makeOne(self, *varies): return add_vary_callback(varies) def test_add_single_vary(self): cb = self._makeOne('cookie') response = DummyResponse() cb(None, response) assert len(response.vary) == 1 assert 'cookie' in response.vary def test_add_multiple_vary(self): cb = self._makeOne('cookie', 'authorization') response = DummyResponse() cb(None, response) assert len(response.vary) == 2 assert 'cookie' in response.vary assert 'authorization' in response.vary def test_add_multiple_existing(self): cb = self._makeOne('cookie') response = DummyResponse() response.vary = ['cookie'] cb(None, response) assert len(response.vary) == 1 assert 'cookie' in response.vary class DummyResponse(object): vary = None
Add tests to excercise the vary callback utility
Add tests to excercise the vary callback utility
Python
isc
usingnamespace/pyramid_authsanity
Add tests to excercise the vary callback utility
from pyramid_authsanity.util import add_vary_callback class TestAddVaryCallback(object): def _makeOne(self, *varies): return add_vary_callback(varies) def test_add_single_vary(self): cb = self._makeOne('cookie') response = DummyResponse() cb(None, response) assert len(response.vary) == 1 assert 'cookie' in response.vary def test_add_multiple_vary(self): cb = self._makeOne('cookie', 'authorization') response = DummyResponse() cb(None, response) assert len(response.vary) == 2 assert 'cookie' in response.vary assert 'authorization' in response.vary def test_add_multiple_existing(self): cb = self._makeOne('cookie') response = DummyResponse() response.vary = ['cookie'] cb(None, response) assert len(response.vary) == 1 assert 'cookie' in response.vary class DummyResponse(object): vary = None
<commit_before><commit_msg>Add tests to excercise the vary callback utility<commit_after>
from pyramid_authsanity.util import add_vary_callback class TestAddVaryCallback(object): def _makeOne(self, *varies): return add_vary_callback(varies) def test_add_single_vary(self): cb = self._makeOne('cookie') response = DummyResponse() cb(None, response) assert len(response.vary) == 1 assert 'cookie' in response.vary def test_add_multiple_vary(self): cb = self._makeOne('cookie', 'authorization') response = DummyResponse() cb(None, response) assert len(response.vary) == 2 assert 'cookie' in response.vary assert 'authorization' in response.vary def test_add_multiple_existing(self): cb = self._makeOne('cookie') response = DummyResponse() response.vary = ['cookie'] cb(None, response) assert len(response.vary) == 1 assert 'cookie' in response.vary class DummyResponse(object): vary = None
Add tests to excercise the vary callback utilityfrom pyramid_authsanity.util import add_vary_callback class TestAddVaryCallback(object): def _makeOne(self, *varies): return add_vary_callback(varies) def test_add_single_vary(self): cb = self._makeOne('cookie') response = DummyResponse() cb(None, response) assert len(response.vary) == 1 assert 'cookie' in response.vary def test_add_multiple_vary(self): cb = self._makeOne('cookie', 'authorization') response = DummyResponse() cb(None, response) assert len(response.vary) == 2 assert 'cookie' in response.vary assert 'authorization' in response.vary def test_add_multiple_existing(self): cb = self._makeOne('cookie') response = DummyResponse() response.vary = ['cookie'] cb(None, response) assert len(response.vary) == 1 assert 'cookie' in response.vary class DummyResponse(object): vary = None
<commit_before><commit_msg>Add tests to excercise the vary callback utility<commit_after>from pyramid_authsanity.util import add_vary_callback class TestAddVaryCallback(object): def _makeOne(self, *varies): return add_vary_callback(varies) def test_add_single_vary(self): cb = self._makeOne('cookie') response = DummyResponse() cb(None, response) assert len(response.vary) == 1 assert 'cookie' in response.vary def test_add_multiple_vary(self): cb = self._makeOne('cookie', 'authorization') response = DummyResponse() cb(None, response) assert len(response.vary) == 2 assert 'cookie' in response.vary assert 'authorization' in response.vary def test_add_multiple_existing(self): cb = self._makeOne('cookie') response = DummyResponse() response.vary = ['cookie'] cb(None, response) assert len(response.vary) == 1 assert 'cookie' in response.vary class DummyResponse(object): vary = None
689fc658234346f1e44cb4c59c0a1143bf722e6d
scripts/fcn_forward.py
scripts/fcn_forward.py
#!/usr/bin/env python import os.path as osp from chainer import cuda import chainer.serializers as S from chainer import Variable import matplotlib.pyplot as plt import numpy as np from scipy.misc import imread from skimage.color import label2rgb import fcn from fcn.models import FCN8s def main(): data_dir = fcn.get_data_dir() img_file = osp.join(data_dir, 'pascal/VOC2012/JPEGImages/2007_000129.jpg') chainermodel = osp.join(data_dir, 'fcn8s.chainermodel') train = False model = FCN8s() S.load_hdf5(chainermodel, model) model.to_gpu() img = imread(img_file) x_data_0 = img.astype(np.float32) x_data_0 = x_data_0[:, :, ::-1] # RGB -> BGR x_data_0 -= np.array((104.00698793, 116.66876762, 122.67891434)) x_data_0 = x_data_0.transpose((2, 0, 1)) x_data = np.array([x_data_0], dtype=np.float32) x_data = cuda.to_gpu(x_data) x = Variable(x_data, volatile=not train) model.train = train pred = model(x) pred_datum = cuda.to_cpu(pred.data)[0] label = np.argmax(pred_datum, axis=0) print('unique labels:', np.unique(label)) label_viz = label2rgb(label, img, bg_label=0) plt.imshow(label_viz) plt.show() if __name__ == '__main__': main()
Add forward function of FCN8s
Add forward function of FCN8s
Python
mit
wkentaro/fcn
Add forward function of FCN8s
#!/usr/bin/env python import os.path as osp from chainer import cuda import chainer.serializers as S from chainer import Variable import matplotlib.pyplot as plt import numpy as np from scipy.misc import imread from skimage.color import label2rgb import fcn from fcn.models import FCN8s def main(): data_dir = fcn.get_data_dir() img_file = osp.join(data_dir, 'pascal/VOC2012/JPEGImages/2007_000129.jpg') chainermodel = osp.join(data_dir, 'fcn8s.chainermodel') train = False model = FCN8s() S.load_hdf5(chainermodel, model) model.to_gpu() img = imread(img_file) x_data_0 = img.astype(np.float32) x_data_0 = x_data_0[:, :, ::-1] # RGB -> BGR x_data_0 -= np.array((104.00698793, 116.66876762, 122.67891434)) x_data_0 = x_data_0.transpose((2, 0, 1)) x_data = np.array([x_data_0], dtype=np.float32) x_data = cuda.to_gpu(x_data) x = Variable(x_data, volatile=not train) model.train = train pred = model(x) pred_datum = cuda.to_cpu(pred.data)[0] label = np.argmax(pred_datum, axis=0) print('unique labels:', np.unique(label)) label_viz = label2rgb(label, img, bg_label=0) plt.imshow(label_viz) plt.show() if __name__ == '__main__': main()
<commit_before><commit_msg>Add forward function of FCN8s<commit_after>
#!/usr/bin/env python import os.path as osp from chainer import cuda import chainer.serializers as S from chainer import Variable import matplotlib.pyplot as plt import numpy as np from scipy.misc import imread from skimage.color import label2rgb import fcn from fcn.models import FCN8s def main(): data_dir = fcn.get_data_dir() img_file = osp.join(data_dir, 'pascal/VOC2012/JPEGImages/2007_000129.jpg') chainermodel = osp.join(data_dir, 'fcn8s.chainermodel') train = False model = FCN8s() S.load_hdf5(chainermodel, model) model.to_gpu() img = imread(img_file) x_data_0 = img.astype(np.float32) x_data_0 = x_data_0[:, :, ::-1] # RGB -> BGR x_data_0 -= np.array((104.00698793, 116.66876762, 122.67891434)) x_data_0 = x_data_0.transpose((2, 0, 1)) x_data = np.array([x_data_0], dtype=np.float32) x_data = cuda.to_gpu(x_data) x = Variable(x_data, volatile=not train) model.train = train pred = model(x) pred_datum = cuda.to_cpu(pred.data)[0] label = np.argmax(pred_datum, axis=0) print('unique labels:', np.unique(label)) label_viz = label2rgb(label, img, bg_label=0) plt.imshow(label_viz) plt.show() if __name__ == '__main__': main()
Add forward function of FCN8s#!/usr/bin/env python import os.path as osp from chainer import cuda import chainer.serializers as S from chainer import Variable import matplotlib.pyplot as plt import numpy as np from scipy.misc import imread from skimage.color import label2rgb import fcn from fcn.models import FCN8s def main(): data_dir = fcn.get_data_dir() img_file = osp.join(data_dir, 'pascal/VOC2012/JPEGImages/2007_000129.jpg') chainermodel = osp.join(data_dir, 'fcn8s.chainermodel') train = False model = FCN8s() S.load_hdf5(chainermodel, model) model.to_gpu() img = imread(img_file) x_data_0 = img.astype(np.float32) x_data_0 = x_data_0[:, :, ::-1] # RGB -> BGR x_data_0 -= np.array((104.00698793, 116.66876762, 122.67891434)) x_data_0 = x_data_0.transpose((2, 0, 1)) x_data = np.array([x_data_0], dtype=np.float32) x_data = cuda.to_gpu(x_data) x = Variable(x_data, volatile=not train) model.train = train pred = model(x) pred_datum = cuda.to_cpu(pred.data)[0] label = np.argmax(pred_datum, axis=0) print('unique labels:', np.unique(label)) label_viz = label2rgb(label, img, bg_label=0) plt.imshow(label_viz) plt.show() if __name__ == '__main__': main()
<commit_before><commit_msg>Add forward function of FCN8s<commit_after>#!/usr/bin/env python import os.path as osp from chainer import cuda import chainer.serializers as S from chainer import Variable import matplotlib.pyplot as plt import numpy as np from scipy.misc import imread from skimage.color import label2rgb import fcn from fcn.models import FCN8s def main(): data_dir = fcn.get_data_dir() img_file = osp.join(data_dir, 'pascal/VOC2012/JPEGImages/2007_000129.jpg') chainermodel = osp.join(data_dir, 'fcn8s.chainermodel') train = False model = FCN8s() S.load_hdf5(chainermodel, model) model.to_gpu() img = imread(img_file) x_data_0 = img.astype(np.float32) x_data_0 = x_data_0[:, :, ::-1] # RGB -> BGR x_data_0 -= np.array((104.00698793, 116.66876762, 122.67891434)) x_data_0 = x_data_0.transpose((2, 0, 1)) x_data = np.array([x_data_0], dtype=np.float32) x_data = cuda.to_gpu(x_data) x = Variable(x_data, volatile=not train) model.train = train pred = model(x) pred_datum = cuda.to_cpu(pred.data)[0] label = np.argmax(pred_datum, axis=0) print('unique labels:', np.unique(label)) label_viz = label2rgb(label, img, bg_label=0) plt.imshow(label_viz) plt.show() if __name__ == '__main__': main()
49d1d8ce0221af7f9732b09370cd0c0ec85b7191
indra/sources/process_mentions.py
indra/sources/process_mentions.py
import json from indra.statements import Activation, Agent def process_mentions(fname): with open(fname, 'r') as fh: jd = json.load(fh) mentions = jd['mentions'] events = [m for m in mentions if m['type'] == 'EventMention'] events = [e for e in events if len(e['arguments']) == 2] events = [e for e in events if 'cause' in e['arguments']] stmts = [] for event in events: cause = event['arguments']['cause'][0]['text'] effect = event['arguments']['effect'][0]['text'] st = Activation(Agent(cause), Agent(effect)) stmts.append(st) return stmts
Add initial mention processing code
Add initial mention processing code
Python
bsd-2-clause
sorgerlab/indra,pvtodorov/indra,johnbachman/belpy,sorgerlab/indra,bgyori/indra,sorgerlab/belpy,johnbachman/indra,pvtodorov/indra,bgyori/indra,bgyori/indra,johnbachman/indra,pvtodorov/indra,pvtodorov/indra,johnbachman/belpy,sorgerlab/indra,sorgerlab/belpy,sorgerlab/belpy,johnbachman/belpy,johnbachman/indra
Add initial mention processing code
import json from indra.statements import Activation, Agent def process_mentions(fname): with open(fname, 'r') as fh: jd = json.load(fh) mentions = jd['mentions'] events = [m for m in mentions if m['type'] == 'EventMention'] events = [e for e in events if len(e['arguments']) == 2] events = [e for e in events if 'cause' in e['arguments']] stmts = [] for event in events: cause = event['arguments']['cause'][0]['text'] effect = event['arguments']['effect'][0]['text'] st = Activation(Agent(cause), Agent(effect)) stmts.append(st) return stmts
<commit_before><commit_msg>Add initial mention processing code<commit_after>
import json from indra.statements import Activation, Agent def process_mentions(fname): with open(fname, 'r') as fh: jd = json.load(fh) mentions = jd['mentions'] events = [m for m in mentions if m['type'] == 'EventMention'] events = [e for e in events if len(e['arguments']) == 2] events = [e for e in events if 'cause' in e['arguments']] stmts = [] for event in events: cause = event['arguments']['cause'][0]['text'] effect = event['arguments']['effect'][0]['text'] st = Activation(Agent(cause), Agent(effect)) stmts.append(st) return stmts
Add initial mention processing codeimport json from indra.statements import Activation, Agent def process_mentions(fname): with open(fname, 'r') as fh: jd = json.load(fh) mentions = jd['mentions'] events = [m for m in mentions if m['type'] == 'EventMention'] events = [e for e in events if len(e['arguments']) == 2] events = [e for e in events if 'cause' in e['arguments']] stmts = [] for event in events: cause = event['arguments']['cause'][0]['text'] effect = event['arguments']['effect'][0]['text'] st = Activation(Agent(cause), Agent(effect)) stmts.append(st) return stmts
<commit_before><commit_msg>Add initial mention processing code<commit_after>import json from indra.statements import Activation, Agent def process_mentions(fname): with open(fname, 'r') as fh: jd = json.load(fh) mentions = jd['mentions'] events = [m for m in mentions if m['type'] == 'EventMention'] events = [e for e in events if len(e['arguments']) == 2] events = [e for e in events if 'cause' in e['arguments']] stmts = [] for event in events: cause = event['arguments']['cause'][0]['text'] effect = event['arguments']['effect'][0]['text'] st = Activation(Agent(cause), Agent(effect)) stmts.append(st) return stmts
534bfdd64d6e25f02897f15846c54d7e435b2406
jenkinsapi/utils/krb_requester.py
jenkinsapi/utils/krb_requester.py
from jenkinsapi.utils.requester import Requester from requests_kerberos import HTTPKerberosAuth, OPTIONAL class KrbRequester(Requester): """ A class which carries out HTTP requests with Kerberos/GSSAPI authentication. """ def __init__(self, ssl_verify=None, baseurl=None, mutual_auth=OPTIONAL): """ :param ssl_verify: flag indicating if server certificate in HTTPS requests should be verified :param baseurl: Jenkins' base URL :param mutual_auth: type of mutual authentication, use one of REQUIRED, OPTIONAL or DISABLED from requests_kerberos package """ args = {} if ssl_verify: args["ssl_verify"] = ssl_verify if baseurl: args["baseurl"] = baseurl super(KrbRequester, self).__init__(**args) self.mutual_auth = mutual_auth def get_request_dict(self, url, params, data, headers): req_dict = super(KrbRequester, self).get_request_dict(url=url, params=params, data=data, headers=headers) if self.mutual_auth: auth = HTTPKerberosAuth(self.mutual_auth) else: auth = HTTPKerberosAuth() req_dict['auth'] = auth return req_dict
Add kerberos authentication requester using requests_kerberos
Add kerberos authentication requester using requests_kerberos
Python
mit
jduan/jenkinsapi,zaro0508/jenkinsapi,imsardine/jenkinsapi,domenkozar/jenkinsapi,mistermocha/jenkinsapi,imsardine/jenkinsapi,jduan/jenkinsapi,domenkozar/jenkinsapi,salimfadhley/jenkinsapi,JohnLZeller/jenkinsapi,JohnLZeller/jenkinsapi,salimfadhley/jenkinsapi,imsardine/jenkinsapi,zaro0508/jenkinsapi,mistermocha/jenkinsapi,mistermocha/jenkinsapi,aerickson/jenkinsapi,JohnLZeller/jenkinsapi,aerickson/jenkinsapi,zaro0508/jenkinsapi
Add kerberos authentication requester using requests_kerberos
from jenkinsapi.utils.requester import Requester from requests_kerberos import HTTPKerberosAuth, OPTIONAL class KrbRequester(Requester): """ A class which carries out HTTP requests with Kerberos/GSSAPI authentication. """ def __init__(self, ssl_verify=None, baseurl=None, mutual_auth=OPTIONAL): """ :param ssl_verify: flag indicating if server certificate in HTTPS requests should be verified :param baseurl: Jenkins' base URL :param mutual_auth: type of mutual authentication, use one of REQUIRED, OPTIONAL or DISABLED from requests_kerberos package """ args = {} if ssl_verify: args["ssl_verify"] = ssl_verify if baseurl: args["baseurl"] = baseurl super(KrbRequester, self).__init__(**args) self.mutual_auth = mutual_auth def get_request_dict(self, url, params, data, headers): req_dict = super(KrbRequester, self).get_request_dict(url=url, params=params, data=data, headers=headers) if self.mutual_auth: auth = HTTPKerberosAuth(self.mutual_auth) else: auth = HTTPKerberosAuth() req_dict['auth'] = auth return req_dict
<commit_before><commit_msg>Add kerberos authentication requester using requests_kerberos<commit_after>
from jenkinsapi.utils.requester import Requester from requests_kerberos import HTTPKerberosAuth, OPTIONAL class KrbRequester(Requester): """ A class which carries out HTTP requests with Kerberos/GSSAPI authentication. """ def __init__(self, ssl_verify=None, baseurl=None, mutual_auth=OPTIONAL): """ :param ssl_verify: flag indicating if server certificate in HTTPS requests should be verified :param baseurl: Jenkins' base URL :param mutual_auth: type of mutual authentication, use one of REQUIRED, OPTIONAL or DISABLED from requests_kerberos package """ args = {} if ssl_verify: args["ssl_verify"] = ssl_verify if baseurl: args["baseurl"] = baseurl super(KrbRequester, self).__init__(**args) self.mutual_auth = mutual_auth def get_request_dict(self, url, params, data, headers): req_dict = super(KrbRequester, self).get_request_dict(url=url, params=params, data=data, headers=headers) if self.mutual_auth: auth = HTTPKerberosAuth(self.mutual_auth) else: auth = HTTPKerberosAuth() req_dict['auth'] = auth return req_dict
Add kerberos authentication requester using requests_kerberosfrom jenkinsapi.utils.requester import Requester from requests_kerberos import HTTPKerberosAuth, OPTIONAL class KrbRequester(Requester): """ A class which carries out HTTP requests with Kerberos/GSSAPI authentication. """ def __init__(self, ssl_verify=None, baseurl=None, mutual_auth=OPTIONAL): """ :param ssl_verify: flag indicating if server certificate in HTTPS requests should be verified :param baseurl: Jenkins' base URL :param mutual_auth: type of mutual authentication, use one of REQUIRED, OPTIONAL or DISABLED from requests_kerberos package """ args = {} if ssl_verify: args["ssl_verify"] = ssl_verify if baseurl: args["baseurl"] = baseurl super(KrbRequester, self).__init__(**args) self.mutual_auth = mutual_auth def get_request_dict(self, url, params, data, headers): req_dict = super(KrbRequester, self).get_request_dict(url=url, params=params, data=data, headers=headers) if self.mutual_auth: auth = HTTPKerberosAuth(self.mutual_auth) else: auth = HTTPKerberosAuth() req_dict['auth'] = auth return req_dict
<commit_before><commit_msg>Add kerberos authentication requester using requests_kerberos<commit_after>from jenkinsapi.utils.requester import Requester from requests_kerberos import HTTPKerberosAuth, OPTIONAL class KrbRequester(Requester): """ A class which carries out HTTP requests with Kerberos/GSSAPI authentication. """ def __init__(self, ssl_verify=None, baseurl=None, mutual_auth=OPTIONAL): """ :param ssl_verify: flag indicating if server certificate in HTTPS requests should be verified :param baseurl: Jenkins' base URL :param mutual_auth: type of mutual authentication, use one of REQUIRED, OPTIONAL or DISABLED from requests_kerberos package """ args = {} if ssl_verify: args["ssl_verify"] = ssl_verify if baseurl: args["baseurl"] = baseurl super(KrbRequester, self).__init__(**args) self.mutual_auth = mutual_auth def get_request_dict(self, url, params, data, headers): req_dict = super(KrbRequester, self).get_request_dict(url=url, params=params, data=data, headers=headers) if self.mutual_auth: auth = HTTPKerberosAuth(self.mutual_auth) else: auth = HTTPKerberosAuth() req_dict['auth'] = auth return req_dict
7fa4f8d6fcef81ec91b6cdff70880ad053c887bc
scripts/newActivity.py
scripts/newActivity.py
#!/usr/bin/env python from datetime import datetime from pymongo import MongoClient import re from subprocess import call import sys # minutes window = 30 if len(sys.argv) != 2: print 'Usage: %s <logfile>' % sys.argv[0] sys.exit(1) now = datetime.now() logformat = re.compile('(\d{4}-\d\d-\d\d \d\d:\d\d:\d\d).*Created new projection.*?ColumnDescriptor\(/(\d{10})/.*') active = set() with open(sys.argv[1]) as input: for line in input: hit = logformat.match(line) if hit: timestamp = datetime.strptime(hit.group(1), '%Y-%m-%d %H:%M:%S') if abs((now - timestamp).seconds) < 60*window: # 30 minute window for now active.add(hit.group(2)) #print 'Activity at %s for %s: %s' % (hit.group(1), hit.group(2), hit.group(0)) if len(active) > 0: print 'Active accounts with new columns in the last %d minutes:' % window conn = MongoClient('localhost', 27017) accounts = {} for acct in conn['accounts_v1_2_2']['accounts'].find(): accounts[acct['accountId']] = acct['email'] for acct in sorted(list(active)): print ' %s (%s)' % (acct, accounts[acct]) conn.close()
Add in script to track new account activity
Add in script to track new account activity
Python
agpl-3.0
precog/platform,precog/platform,precog/platform,precog/platform
Add in script to track new account activity
#!/usr/bin/env python from datetime import datetime from pymongo import MongoClient import re from subprocess import call import sys # minutes window = 30 if len(sys.argv) != 2: print 'Usage: %s <logfile>' % sys.argv[0] sys.exit(1) now = datetime.now() logformat = re.compile('(\d{4}-\d\d-\d\d \d\d:\d\d:\d\d).*Created new projection.*?ColumnDescriptor\(/(\d{10})/.*') active = set() with open(sys.argv[1]) as input: for line in input: hit = logformat.match(line) if hit: timestamp = datetime.strptime(hit.group(1), '%Y-%m-%d %H:%M:%S') if abs((now - timestamp).seconds) < 60*window: # 30 minute window for now active.add(hit.group(2)) #print 'Activity at %s for %s: %s' % (hit.group(1), hit.group(2), hit.group(0)) if len(active) > 0: print 'Active accounts with new columns in the last %d minutes:' % window conn = MongoClient('localhost', 27017) accounts = {} for acct in conn['accounts_v1_2_2']['accounts'].find(): accounts[acct['accountId']] = acct['email'] for acct in sorted(list(active)): print ' %s (%s)' % (acct, accounts[acct]) conn.close()
<commit_before><commit_msg>Add in script to track new account activity<commit_after>
#!/usr/bin/env python from datetime import datetime from pymongo import MongoClient import re from subprocess import call import sys # minutes window = 30 if len(sys.argv) != 2: print 'Usage: %s <logfile>' % sys.argv[0] sys.exit(1) now = datetime.now() logformat = re.compile('(\d{4}-\d\d-\d\d \d\d:\d\d:\d\d).*Created new projection.*?ColumnDescriptor\(/(\d{10})/.*') active = set() with open(sys.argv[1]) as input: for line in input: hit = logformat.match(line) if hit: timestamp = datetime.strptime(hit.group(1), '%Y-%m-%d %H:%M:%S') if abs((now - timestamp).seconds) < 60*window: # 30 minute window for now active.add(hit.group(2)) #print 'Activity at %s for %s: %s' % (hit.group(1), hit.group(2), hit.group(0)) if len(active) > 0: print 'Active accounts with new columns in the last %d minutes:' % window conn = MongoClient('localhost', 27017) accounts = {} for acct in conn['accounts_v1_2_2']['accounts'].find(): accounts[acct['accountId']] = acct['email'] for acct in sorted(list(active)): print ' %s (%s)' % (acct, accounts[acct]) conn.close()
Add in script to track new account activity#!/usr/bin/env python from datetime import datetime from pymongo import MongoClient import re from subprocess import call import sys # minutes window = 30 if len(sys.argv) != 2: print 'Usage: %s <logfile>' % sys.argv[0] sys.exit(1) now = datetime.now() logformat = re.compile('(\d{4}-\d\d-\d\d \d\d:\d\d:\d\d).*Created new projection.*?ColumnDescriptor\(/(\d{10})/.*') active = set() with open(sys.argv[1]) as input: for line in input: hit = logformat.match(line) if hit: timestamp = datetime.strptime(hit.group(1), '%Y-%m-%d %H:%M:%S') if abs((now - timestamp).seconds) < 60*window: # 30 minute window for now active.add(hit.group(2)) #print 'Activity at %s for %s: %s' % (hit.group(1), hit.group(2), hit.group(0)) if len(active) > 0: print 'Active accounts with new columns in the last %d minutes:' % window conn = MongoClient('localhost', 27017) accounts = {} for acct in conn['accounts_v1_2_2']['accounts'].find(): accounts[acct['accountId']] = acct['email'] for acct in sorted(list(active)): print ' %s (%s)' % (acct, accounts[acct]) conn.close()
<commit_before><commit_msg>Add in script to track new account activity<commit_after>#!/usr/bin/env python from datetime import datetime from pymongo import MongoClient import re from subprocess import call import sys # minutes window = 30 if len(sys.argv) != 2: print 'Usage: %s <logfile>' % sys.argv[0] sys.exit(1) now = datetime.now() logformat = re.compile('(\d{4}-\d\d-\d\d \d\d:\d\d:\d\d).*Created new projection.*?ColumnDescriptor\(/(\d{10})/.*') active = set() with open(sys.argv[1]) as input: for line in input: hit = logformat.match(line) if hit: timestamp = datetime.strptime(hit.group(1), '%Y-%m-%d %H:%M:%S') if abs((now - timestamp).seconds) < 60*window: # 30 minute window for now active.add(hit.group(2)) #print 'Activity at %s for %s: %s' % (hit.group(1), hit.group(2), hit.group(0)) if len(active) > 0: print 'Active accounts with new columns in the last %d minutes:' % window conn = MongoClient('localhost', 27017) accounts = {} for acct in conn['accounts_v1_2_2']['accounts'].find(): accounts[acct['accountId']] = acct['email'] for acct in sorted(list(active)): print ' %s (%s)' % (acct, accounts[acct]) conn.close()
26a9d49622e54fa20d9875ca1329ca304496030f
molly/favourites/utils.py
molly/favourites/utils.py
""" Utilities to help handling favourites """ from django.http import Http404 from django.core.urlresolvers import resolve def get_favourites(request): """ Returns a list of favourites, the list is tuples of (title, URL) """ fs = [] for url in (request.session['favourites'] if 'favourites' in request.session else []): # Remove broken links from the favourites try: view, args, kwargs = resolve(url) breadcrumb = view.breadcrumb(request, view.initial_context(request, *args, **kwargs), *args, **kwargs) fs.append((breadcrumb[4], url)) except Http404: request.session['favourites'].remove(url) request.session.modified = True return fs
Add utility functions to get a list of favourites
Add utility functions to get a list of favourites
Python
apache-2.0
mollyproject/mollyproject,mollyproject/mollyproject,mollyproject/mollyproject
Add utility functions to get a list of favourites
""" Utilities to help handling favourites """ from django.http import Http404 from django.core.urlresolvers import resolve def get_favourites(request): """ Returns a list of favourites, the list is tuples of (title, URL) """ fs = [] for url in (request.session['favourites'] if 'favourites' in request.session else []): # Remove broken links from the favourites try: view, args, kwargs = resolve(url) breadcrumb = view.breadcrumb(request, view.initial_context(request, *args, **kwargs), *args, **kwargs) fs.append((breadcrumb[4], url)) except Http404: request.session['favourites'].remove(url) request.session.modified = True return fs
<commit_before><commit_msg>Add utility functions to get a list of favourites<commit_after>
""" Utilities to help handling favourites """ from django.http import Http404 from django.core.urlresolvers import resolve def get_favourites(request): """ Returns a list of favourites, the list is tuples of (title, URL) """ fs = [] for url in (request.session['favourites'] if 'favourites' in request.session else []): # Remove broken links from the favourites try: view, args, kwargs = resolve(url) breadcrumb = view.breadcrumb(request, view.initial_context(request, *args, **kwargs), *args, **kwargs) fs.append((breadcrumb[4], url)) except Http404: request.session['favourites'].remove(url) request.session.modified = True return fs
Add utility functions to get a list of favourites""" Utilities to help handling favourites """ from django.http import Http404 from django.core.urlresolvers import resolve def get_favourites(request): """ Returns a list of favourites, the list is tuples of (title, URL) """ fs = [] for url in (request.session['favourites'] if 'favourites' in request.session else []): # Remove broken links from the favourites try: view, args, kwargs = resolve(url) breadcrumb = view.breadcrumb(request, view.initial_context(request, *args, **kwargs), *args, **kwargs) fs.append((breadcrumb[4], url)) except Http404: request.session['favourites'].remove(url) request.session.modified = True return fs
<commit_before><commit_msg>Add utility functions to get a list of favourites<commit_after>""" Utilities to help handling favourites """ from django.http import Http404 from django.core.urlresolvers import resolve def get_favourites(request): """ Returns a list of favourites, the list is tuples of (title, URL) """ fs = [] for url in (request.session['favourites'] if 'favourites' in request.session else []): # Remove broken links from the favourites try: view, args, kwargs = resolve(url) breadcrumb = view.breadcrumb(request, view.initial_context(request, *args, **kwargs), *args, **kwargs) fs.append((breadcrumb[4], url)) except Http404: request.session['favourites'].remove(url) request.session.modified = True return fs
b9485e5510c92d88451c272c327d4077cde39d96
src/test_numbers.py
src/test_numbers.py
from nose.tools import assert_equal, assert_almost_equal, assert_true, \ assert_false, assert_raises, assert_is_instance from subprocess import call from ksatools import loadfile, calcmedian, calccumsum import numpy as np from numpy import array def test_01(): data = loadfile("../test/test01.21") cn, c1, yd, yo, zd, zo = calccumsum(data) pass
Add skeleton for statistics-sanity check tests
Add skeleton for statistics-sanity check tests
Python
bsd-2-clause
wltrimbl/kmerspectrumanalyzer,MG-RAST/kmerspectrumanalyzer,MG-RAST/kmerspectrumanalyzer,wltrimbl/kmerspectrumanalyzer,wltrimbl/kmerspectrumanalyzer,MG-RAST/kmerspectrumanalyzer
Add skeleton for statistics-sanity check tests
from nose.tools import assert_equal, assert_almost_equal, assert_true, \ assert_false, assert_raises, assert_is_instance from subprocess import call from ksatools import loadfile, calcmedian, calccumsum import numpy as np from numpy import array def test_01(): data = loadfile("../test/test01.21") cn, c1, yd, yo, zd, zo = calccumsum(data) pass
<commit_before><commit_msg>Add skeleton for statistics-sanity check tests<commit_after>
from nose.tools import assert_equal, assert_almost_equal, assert_true, \ assert_false, assert_raises, assert_is_instance from subprocess import call from ksatools import loadfile, calcmedian, calccumsum import numpy as np from numpy import array def test_01(): data = loadfile("../test/test01.21") cn, c1, yd, yo, zd, zo = calccumsum(data) pass
Add skeleton for statistics-sanity check tests from nose.tools import assert_equal, assert_almost_equal, assert_true, \ assert_false, assert_raises, assert_is_instance from subprocess import call from ksatools import loadfile, calcmedian, calccumsum import numpy as np from numpy import array def test_01(): data = loadfile("../test/test01.21") cn, c1, yd, yo, zd, zo = calccumsum(data) pass
<commit_before><commit_msg>Add skeleton for statistics-sanity check tests<commit_after> from nose.tools import assert_equal, assert_almost_equal, assert_true, \ assert_false, assert_raises, assert_is_instance from subprocess import call from ksatools import loadfile, calcmedian, calccumsum import numpy as np from numpy import array def test_01(): data = loadfile("../test/test01.21") cn, c1, yd, yo, zd, zo = calccumsum(data) pass
3a09a2ed29c05eaa66a010405f9e609951bf5238
submodules-to-glockfile.py
submodules-to-glockfile.py
#!/usr/bin/python import re import subprocess def main(): source = open(".gitmodules").read() paths = re.findall(r"path = (.*)", source) for path in paths: print "{repo} {sha}".format( repo = path[7:], sha = path_sha1(path) ) def path_sha1(path): cmd = "cd {} && git rev-parse HEAD".format(path) sp = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) sha1 = sp.stdout.read()[:-1] return sha1 if __name__ == "__main__": main()
Add a script that prints submodules info in Glockfile compatible format
Add a script that prints submodules info in Glockfile compatible format
Python
mit
localhots/satan,localhots/satan,localhots/satan,localhots/satan
Add a script that prints submodules info in Glockfile compatible format
#!/usr/bin/python import re import subprocess def main(): source = open(".gitmodules").read() paths = re.findall(r"path = (.*)", source) for path in paths: print "{repo} {sha}".format( repo = path[7:], sha = path_sha1(path) ) def path_sha1(path): cmd = "cd {} && git rev-parse HEAD".format(path) sp = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) sha1 = sp.stdout.read()[:-1] return sha1 if __name__ == "__main__": main()
<commit_before><commit_msg>Add a script that prints submodules info in Glockfile compatible format<commit_after>
#!/usr/bin/python import re import subprocess def main(): source = open(".gitmodules").read() paths = re.findall(r"path = (.*)", source) for path in paths: print "{repo} {sha}".format( repo = path[7:], sha = path_sha1(path) ) def path_sha1(path): cmd = "cd {} && git rev-parse HEAD".format(path) sp = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) sha1 = sp.stdout.read()[:-1] return sha1 if __name__ == "__main__": main()
Add a script that prints submodules info in Glockfile compatible format#!/usr/bin/python import re import subprocess def main(): source = open(".gitmodules").read() paths = re.findall(r"path = (.*)", source) for path in paths: print "{repo} {sha}".format( repo = path[7:], sha = path_sha1(path) ) def path_sha1(path): cmd = "cd {} && git rev-parse HEAD".format(path) sp = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) sha1 = sp.stdout.read()[:-1] return sha1 if __name__ == "__main__": main()
<commit_before><commit_msg>Add a script that prints submodules info in Glockfile compatible format<commit_after>#!/usr/bin/python import re import subprocess def main(): source = open(".gitmodules").read() paths = re.findall(r"path = (.*)", source) for path in paths: print "{repo} {sha}".format( repo = path[7:], sha = path_sha1(path) ) def path_sha1(path): cmd = "cd {} && git rev-parse HEAD".format(path) sp = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE) sha1 = sp.stdout.read()[:-1] return sha1 if __name__ == "__main__": main()
3b30c40dfeccae5fb22a69369a2b09b8f3a5e9cd
iterative_binary_search.py
iterative_binary_search.py
def iterative_binary_search(array, item): low = 0 high = len(array) - 1 while(low<=high): mid = (low+high)//2 if (item == array[mid]): return mid elif item < array[mid]: high = mid - 1 elif item > array[mid]: low = mid + 1 return -1 print ( iterative_binary_search([1,4,5,7], 5)) #2 print ( iterative_binary_search([1,4,5,7], 10)) #-1
Add iterative binary search algorithm implementation
Add iterative binary search algorithm implementation
Python
mit
arafat-al-mahmud/algorithms-python
Add iterative binary search algorithm implementation
def iterative_binary_search(array, item): low = 0 high = len(array) - 1 while(low<=high): mid = (low+high)//2 if (item == array[mid]): return mid elif item < array[mid]: high = mid - 1 elif item > array[mid]: low = mid + 1 return -1 print ( iterative_binary_search([1,4,5,7], 5)) #2 print ( iterative_binary_search([1,4,5,7], 10)) #-1
<commit_before><commit_msg>Add iterative binary search algorithm implementation<commit_after>
def iterative_binary_search(array, item): low = 0 high = len(array) - 1 while(low<=high): mid = (low+high)//2 if (item == array[mid]): return mid elif item < array[mid]: high = mid - 1 elif item > array[mid]: low = mid + 1 return -1 print ( iterative_binary_search([1,4,5,7], 5)) #2 print ( iterative_binary_search([1,4,5,7], 10)) #-1
Add iterative binary search algorithm implementationdef iterative_binary_search(array, item): low = 0 high = len(array) - 1 while(low<=high): mid = (low+high)//2 if (item == array[mid]): return mid elif item < array[mid]: high = mid - 1 elif item > array[mid]: low = mid + 1 return -1 print ( iterative_binary_search([1,4,5,7], 5)) #2 print ( iterative_binary_search([1,4,5,7], 10)) #-1
<commit_before><commit_msg>Add iterative binary search algorithm implementation<commit_after>def iterative_binary_search(array, item): low = 0 high = len(array) - 1 while(low<=high): mid = (low+high)//2 if (item == array[mid]): return mid elif item < array[mid]: high = mid - 1 elif item > array[mid]: low = mid + 1 return -1 print ( iterative_binary_search([1,4,5,7], 5)) #2 print ( iterative_binary_search([1,4,5,7], 10)) #-1
628f41751f247b7bc7e4a6422d33355bd8913bca
resolwe_bio/migrations/0013_star_index.py
resolwe_bio/migrations/0013_star_index.py
from django.db import migrations from resolwe.flow.migration_ops import ResolweProcessChangeType class Migration(migrations.Migration): """ Change the ``alignment-star-index`` process type. """ dependencies = [ ('resolwe_bio', '0012_full_text_search'), ] operations = [ ResolweProcessChangeType( process='alignment-star-index', new_type='data:index:star:', ), ]
Migrate STAR index process type
Migrate STAR index process type
Python
apache-2.0
genialis/resolwe-bio,genialis/resolwe-bio,genialis/resolwe-bio,genialis/resolwe-bio
Migrate STAR index process type
from django.db import migrations from resolwe.flow.migration_ops import ResolweProcessChangeType class Migration(migrations.Migration): """ Change the ``alignment-star-index`` process type. """ dependencies = [ ('resolwe_bio', '0012_full_text_search'), ] operations = [ ResolweProcessChangeType( process='alignment-star-index', new_type='data:index:star:', ), ]
<commit_before><commit_msg>Migrate STAR index process type<commit_after>
from django.db import migrations from resolwe.flow.migration_ops import ResolweProcessChangeType class Migration(migrations.Migration): """ Change the ``alignment-star-index`` process type. """ dependencies = [ ('resolwe_bio', '0012_full_text_search'), ] operations = [ ResolweProcessChangeType( process='alignment-star-index', new_type='data:index:star:', ), ]
Migrate STAR index process typefrom django.db import migrations from resolwe.flow.migration_ops import ResolweProcessChangeType class Migration(migrations.Migration): """ Change the ``alignment-star-index`` process type. """ dependencies = [ ('resolwe_bio', '0012_full_text_search'), ] operations = [ ResolweProcessChangeType( process='alignment-star-index', new_type='data:index:star:', ), ]
<commit_before><commit_msg>Migrate STAR index process type<commit_after>from django.db import migrations from resolwe.flow.migration_ops import ResolweProcessChangeType class Migration(migrations.Migration): """ Change the ``alignment-star-index`` process type. """ dependencies = [ ('resolwe_bio', '0012_full_text_search'), ] operations = [ ResolweProcessChangeType( process='alignment-star-index', new_type='data:index:star:', ), ]
0f77b509a561a90a898ce2668a02a39c6dd10486
app/tests/cases_tests/test_permissions.py
app/tests/cases_tests/test_permissions.py
import pytest from django.conf import settings from django.contrib.auth.models import AnonymousUser, Group from grandchallenge.cases.permissions import ImagePermission from tests.factories import UserFactory, ImageFactory class Request: """ Mock request class containing only user """ def __init__(self, user): self.user = user @pytest.mark.django_db class TestImagePermission: @pytest.mark.parametrize( "user,access", [ (AnonymousUser, False), (UserFactory, False), (UserFactory, True), ("retina_grader_no_access", False), ("retina_admin_no_access", False), ("retina_grader", True), ("retina_admin", True), ], ) def test_permissions(self, user, access): image = ImageFactory() if isinstance(user, str): group_name = ( settings.RETINA_ADMINS_GROUP_NAME if "admin" in user else settings.RETINA_GRADERS_GROUP_NAME ) if "no_access" not in user: image.permit_viewing_by_retina_users() user = UserFactory() grader_group, group_created = Group.objects.get_or_create( name=group_name ) grader_group.user_set.add(user) elif user == AnonymousUser: user = AnonymousUser() else: user = user(is_staff=access) request = Request(user=user) permission = ImagePermission() assert permission.has_object_permission(request, {}, image) == access
Add tests for permission class
Add tests for permission class
Python
apache-2.0
comic/comic-django,comic/comic-django,comic/comic-django,comic/comic-django,comic/comic-django
Add tests for permission class
import pytest from django.conf import settings from django.contrib.auth.models import AnonymousUser, Group from grandchallenge.cases.permissions import ImagePermission from tests.factories import UserFactory, ImageFactory class Request: """ Mock request class containing only user """ def __init__(self, user): self.user = user @pytest.mark.django_db class TestImagePermission: @pytest.mark.parametrize( "user,access", [ (AnonymousUser, False), (UserFactory, False), (UserFactory, True), ("retina_grader_no_access", False), ("retina_admin_no_access", False), ("retina_grader", True), ("retina_admin", True), ], ) def test_permissions(self, user, access): image = ImageFactory() if isinstance(user, str): group_name = ( settings.RETINA_ADMINS_GROUP_NAME if "admin" in user else settings.RETINA_GRADERS_GROUP_NAME ) if "no_access" not in user: image.permit_viewing_by_retina_users() user = UserFactory() grader_group, group_created = Group.objects.get_or_create( name=group_name ) grader_group.user_set.add(user) elif user == AnonymousUser: user = AnonymousUser() else: user = user(is_staff=access) request = Request(user=user) permission = ImagePermission() assert permission.has_object_permission(request, {}, image) == access
<commit_before><commit_msg>Add tests for permission class<commit_after>
import pytest from django.conf import settings from django.contrib.auth.models import AnonymousUser, Group from grandchallenge.cases.permissions import ImagePermission from tests.factories import UserFactory, ImageFactory class Request: """ Mock request class containing only user """ def __init__(self, user): self.user = user @pytest.mark.django_db class TestImagePermission: @pytest.mark.parametrize( "user,access", [ (AnonymousUser, False), (UserFactory, False), (UserFactory, True), ("retina_grader_no_access", False), ("retina_admin_no_access", False), ("retina_grader", True), ("retina_admin", True), ], ) def test_permissions(self, user, access): image = ImageFactory() if isinstance(user, str): group_name = ( settings.RETINA_ADMINS_GROUP_NAME if "admin" in user else settings.RETINA_GRADERS_GROUP_NAME ) if "no_access" not in user: image.permit_viewing_by_retina_users() user = UserFactory() grader_group, group_created = Group.objects.get_or_create( name=group_name ) grader_group.user_set.add(user) elif user == AnonymousUser: user = AnonymousUser() else: user = user(is_staff=access) request = Request(user=user) permission = ImagePermission() assert permission.has_object_permission(request, {}, image) == access
Add tests for permission classimport pytest from django.conf import settings from django.contrib.auth.models import AnonymousUser, Group from grandchallenge.cases.permissions import ImagePermission from tests.factories import UserFactory, ImageFactory class Request: """ Mock request class containing only user """ def __init__(self, user): self.user = user @pytest.mark.django_db class TestImagePermission: @pytest.mark.parametrize( "user,access", [ (AnonymousUser, False), (UserFactory, False), (UserFactory, True), ("retina_grader_no_access", False), ("retina_admin_no_access", False), ("retina_grader", True), ("retina_admin", True), ], ) def test_permissions(self, user, access): image = ImageFactory() if isinstance(user, str): group_name = ( settings.RETINA_ADMINS_GROUP_NAME if "admin" in user else settings.RETINA_GRADERS_GROUP_NAME ) if "no_access" not in user: image.permit_viewing_by_retina_users() user = UserFactory() grader_group, group_created = Group.objects.get_or_create( name=group_name ) grader_group.user_set.add(user) elif user == AnonymousUser: user = AnonymousUser() else: user = user(is_staff=access) request = Request(user=user) permission = ImagePermission() assert permission.has_object_permission(request, {}, image) == access
<commit_before><commit_msg>Add tests for permission class<commit_after>import pytest from django.conf import settings from django.contrib.auth.models import AnonymousUser, Group from grandchallenge.cases.permissions import ImagePermission from tests.factories import UserFactory, ImageFactory class Request: """ Mock request class containing only user """ def __init__(self, user): self.user = user @pytest.mark.django_db class TestImagePermission: @pytest.mark.parametrize( "user,access", [ (AnonymousUser, False), (UserFactory, False), (UserFactory, True), ("retina_grader_no_access", False), ("retina_admin_no_access", False), ("retina_grader", True), ("retina_admin", True), ], ) def test_permissions(self, user, access): image = ImageFactory() if isinstance(user, str): group_name = ( settings.RETINA_ADMINS_GROUP_NAME if "admin" in user else settings.RETINA_GRADERS_GROUP_NAME ) if "no_access" not in user: image.permit_viewing_by_retina_users() user = UserFactory() grader_group, group_created = Group.objects.get_or_create( name=group_name ) grader_group.user_set.add(user) elif user == AnonymousUser: user = AnonymousUser() else: user = user(is_staff=access) request = Request(user=user) permission = ImagePermission() assert permission.has_object_permission(request, {}, image) == access