commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
02181601597e203777412b9377af47525eee77f4
|
custom/enikshay/management/commands/update_enikshay_custom_data.py
|
custom/enikshay/management/commands/update_enikshay_custom_data.py
|
from django.core.management.base import BaseCommand
from corehq.apps.custom_data_fields.models import CustomDataFieldsDefinition, CustomDataField
from corehq.apps.locations.views import LocationFieldsView
from corehq.apps.users.views.mobile.custom_data_fields import UserFieldsView
# pcp -> MBBS
# pac -> AYUSH/other
# plc -> Private Lab
# pcc -> pharmacy / chemist
LOCATION_FIELDS = [
# (slug, label, choices)
('private_sector_org_id', "Private Sector Org ID", []),
('suborganization', "Suborganization", ["MGK", "Alert"]),
]
USER_FIELDS = [
('tb_corner', "TB Corner", ["Yes", "No"]),
('mbbs_qualification', "MBBS Qualification", ["MBBS", "DTCD", "MD - Chest Physician",
"MD - Medicine", "MS", "DM"]),
('ayush_qualification', "AYUSH Qualification", ["BAMS", "BHMS", "BUMS", "DAMS", "DHMS", "ASHA",
"ANM", "GNM", "LCEH", "NGO", "Others", "None"]),
('professional_org_membership', "Professional Org Membership", ["IMA", "WMA", "AMA", "AAFP",
"Others", "None"]),
]
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('domain')
def show(self, definition):
for field in definition.fields:
print " ", field.slug
def confirm(self):
return raw_input("Continue?\n(y/n)") == 'y'
def handle(self, domain, **options):
self.user_data = CustomDataFieldsDefinition.get_or_create(
domain, UserFieldsView.field_type)
self.location_data = CustomDataFieldsDefinition.get_or_create(
domain, LocationFieldsView.field_type)
print "\nOLD:"
self.show(self.user_data)
self.update_definition(self.user_data, USER_FIELDS)
print "\nNEW:"
self.show(self.user_data)
if self.confirm():
self.user_data.save()
print "\nOLD:"
self.show(self.location_data)
self.update_definition(self.location_data, LOCATION_FIELDS)
print "\nNEW:"
self.show(self.location_data)
if self.confirm():
self.location_data.save()
def update_definition(self, definition, fields_spec):
existing = {field.slug for field in definition.fields}
for field in self.get_fields(fields_spec):
if field.slug not in existing:
definition.fields.append(field)
def get_fields(self, spec):
return [
CustomDataField(
slug=slug,
is_required=False,
label=label,
choices=choices,
)
for slug, label, choices in spec
]
|
Add mgmt command to auto-add new fields
|
Add mgmt command to auto-add new fields
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Add mgmt command to auto-add new fields
|
from django.core.management.base import BaseCommand
from corehq.apps.custom_data_fields.models import CustomDataFieldsDefinition, CustomDataField
from corehq.apps.locations.views import LocationFieldsView
from corehq.apps.users.views.mobile.custom_data_fields import UserFieldsView
# pcp -> MBBS
# pac -> AYUSH/other
# plc -> Private Lab
# pcc -> pharmacy / chemist
LOCATION_FIELDS = [
# (slug, label, choices)
('private_sector_org_id', "Private Sector Org ID", []),
('suborganization', "Suborganization", ["MGK", "Alert"]),
]
USER_FIELDS = [
('tb_corner', "TB Corner", ["Yes", "No"]),
('mbbs_qualification', "MBBS Qualification", ["MBBS", "DTCD", "MD - Chest Physician",
"MD - Medicine", "MS", "DM"]),
('ayush_qualification', "AYUSH Qualification", ["BAMS", "BHMS", "BUMS", "DAMS", "DHMS", "ASHA",
"ANM", "GNM", "LCEH", "NGO", "Others", "None"]),
('professional_org_membership', "Professional Org Membership", ["IMA", "WMA", "AMA", "AAFP",
"Others", "None"]),
]
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('domain')
def show(self, definition):
for field in definition.fields:
print " ", field.slug
def confirm(self):
return raw_input("Continue?\n(y/n)") == 'y'
def handle(self, domain, **options):
self.user_data = CustomDataFieldsDefinition.get_or_create(
domain, UserFieldsView.field_type)
self.location_data = CustomDataFieldsDefinition.get_or_create(
domain, LocationFieldsView.field_type)
print "\nOLD:"
self.show(self.user_data)
self.update_definition(self.user_data, USER_FIELDS)
print "\nNEW:"
self.show(self.user_data)
if self.confirm():
self.user_data.save()
print "\nOLD:"
self.show(self.location_data)
self.update_definition(self.location_data, LOCATION_FIELDS)
print "\nNEW:"
self.show(self.location_data)
if self.confirm():
self.location_data.save()
def update_definition(self, definition, fields_spec):
existing = {field.slug for field in definition.fields}
for field in self.get_fields(fields_spec):
if field.slug not in existing:
definition.fields.append(field)
def get_fields(self, spec):
return [
CustomDataField(
slug=slug,
is_required=False,
label=label,
choices=choices,
)
for slug, label, choices in spec
]
|
<commit_before><commit_msg>Add mgmt command to auto-add new fields<commit_after>
|
from django.core.management.base import BaseCommand
from corehq.apps.custom_data_fields.models import CustomDataFieldsDefinition, CustomDataField
from corehq.apps.locations.views import LocationFieldsView
from corehq.apps.users.views.mobile.custom_data_fields import UserFieldsView
# pcp -> MBBS
# pac -> AYUSH/other
# plc -> Private Lab
# pcc -> pharmacy / chemist
LOCATION_FIELDS = [
# (slug, label, choices)
('private_sector_org_id', "Private Sector Org ID", []),
('suborganization', "Suborganization", ["MGK", "Alert"]),
]
USER_FIELDS = [
('tb_corner', "TB Corner", ["Yes", "No"]),
('mbbs_qualification', "MBBS Qualification", ["MBBS", "DTCD", "MD - Chest Physician",
"MD - Medicine", "MS", "DM"]),
('ayush_qualification', "AYUSH Qualification", ["BAMS", "BHMS", "BUMS", "DAMS", "DHMS", "ASHA",
"ANM", "GNM", "LCEH", "NGO", "Others", "None"]),
('professional_org_membership', "Professional Org Membership", ["IMA", "WMA", "AMA", "AAFP",
"Others", "None"]),
]
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('domain')
def show(self, definition):
for field in definition.fields:
print " ", field.slug
def confirm(self):
return raw_input("Continue?\n(y/n)") == 'y'
def handle(self, domain, **options):
self.user_data = CustomDataFieldsDefinition.get_or_create(
domain, UserFieldsView.field_type)
self.location_data = CustomDataFieldsDefinition.get_or_create(
domain, LocationFieldsView.field_type)
print "\nOLD:"
self.show(self.user_data)
self.update_definition(self.user_data, USER_FIELDS)
print "\nNEW:"
self.show(self.user_data)
if self.confirm():
self.user_data.save()
print "\nOLD:"
self.show(self.location_data)
self.update_definition(self.location_data, LOCATION_FIELDS)
print "\nNEW:"
self.show(self.location_data)
if self.confirm():
self.location_data.save()
def update_definition(self, definition, fields_spec):
existing = {field.slug for field in definition.fields}
for field in self.get_fields(fields_spec):
if field.slug not in existing:
definition.fields.append(field)
def get_fields(self, spec):
return [
CustomDataField(
slug=slug,
is_required=False,
label=label,
choices=choices,
)
for slug, label, choices in spec
]
|
Add mgmt command to auto-add new fieldsfrom django.core.management.base import BaseCommand
from corehq.apps.custom_data_fields.models import CustomDataFieldsDefinition, CustomDataField
from corehq.apps.locations.views import LocationFieldsView
from corehq.apps.users.views.mobile.custom_data_fields import UserFieldsView
# pcp -> MBBS
# pac -> AYUSH/other
# plc -> Private Lab
# pcc -> pharmacy / chemist
LOCATION_FIELDS = [
# (slug, label, choices)
('private_sector_org_id', "Private Sector Org ID", []),
('suborganization', "Suborganization", ["MGK", "Alert"]),
]
USER_FIELDS = [
('tb_corner', "TB Corner", ["Yes", "No"]),
('mbbs_qualification', "MBBS Qualification", ["MBBS", "DTCD", "MD - Chest Physician",
"MD - Medicine", "MS", "DM"]),
('ayush_qualification', "AYUSH Qualification", ["BAMS", "BHMS", "BUMS", "DAMS", "DHMS", "ASHA",
"ANM", "GNM", "LCEH", "NGO", "Others", "None"]),
('professional_org_membership', "Professional Org Membership", ["IMA", "WMA", "AMA", "AAFP",
"Others", "None"]),
]
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('domain')
def show(self, definition):
for field in definition.fields:
print " ", field.slug
def confirm(self):
return raw_input("Continue?\n(y/n)") == 'y'
def handle(self, domain, **options):
self.user_data = CustomDataFieldsDefinition.get_or_create(
domain, UserFieldsView.field_type)
self.location_data = CustomDataFieldsDefinition.get_or_create(
domain, LocationFieldsView.field_type)
print "\nOLD:"
self.show(self.user_data)
self.update_definition(self.user_data, USER_FIELDS)
print "\nNEW:"
self.show(self.user_data)
if self.confirm():
self.user_data.save()
print "\nOLD:"
self.show(self.location_data)
self.update_definition(self.location_data, LOCATION_FIELDS)
print "\nNEW:"
self.show(self.location_data)
if self.confirm():
self.location_data.save()
def update_definition(self, definition, fields_spec):
existing = {field.slug for field in definition.fields}
for field in self.get_fields(fields_spec):
if field.slug not in existing:
definition.fields.append(field)
def get_fields(self, spec):
return [
CustomDataField(
slug=slug,
is_required=False,
label=label,
choices=choices,
)
for slug, label, choices in spec
]
|
<commit_before><commit_msg>Add mgmt command to auto-add new fields<commit_after>from django.core.management.base import BaseCommand
from corehq.apps.custom_data_fields.models import CustomDataFieldsDefinition, CustomDataField
from corehq.apps.locations.views import LocationFieldsView
from corehq.apps.users.views.mobile.custom_data_fields import UserFieldsView
# pcp -> MBBS
# pac -> AYUSH/other
# plc -> Private Lab
# pcc -> pharmacy / chemist
LOCATION_FIELDS = [
# (slug, label, choices)
('private_sector_org_id', "Private Sector Org ID", []),
('suborganization', "Suborganization", ["MGK", "Alert"]),
]
USER_FIELDS = [
('tb_corner', "TB Corner", ["Yes", "No"]),
('mbbs_qualification', "MBBS Qualification", ["MBBS", "DTCD", "MD - Chest Physician",
"MD - Medicine", "MS", "DM"]),
('ayush_qualification', "AYUSH Qualification", ["BAMS", "BHMS", "BUMS", "DAMS", "DHMS", "ASHA",
"ANM", "GNM", "LCEH", "NGO", "Others", "None"]),
('professional_org_membership', "Professional Org Membership", ["IMA", "WMA", "AMA", "AAFP",
"Others", "None"]),
]
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('domain')
def show(self, definition):
for field in definition.fields:
print " ", field.slug
def confirm(self):
return raw_input("Continue?\n(y/n)") == 'y'
def handle(self, domain, **options):
self.user_data = CustomDataFieldsDefinition.get_or_create(
domain, UserFieldsView.field_type)
self.location_data = CustomDataFieldsDefinition.get_or_create(
domain, LocationFieldsView.field_type)
print "\nOLD:"
self.show(self.user_data)
self.update_definition(self.user_data, USER_FIELDS)
print "\nNEW:"
self.show(self.user_data)
if self.confirm():
self.user_data.save()
print "\nOLD:"
self.show(self.location_data)
self.update_definition(self.location_data, LOCATION_FIELDS)
print "\nNEW:"
self.show(self.location_data)
if self.confirm():
self.location_data.save()
def update_definition(self, definition, fields_spec):
existing = {field.slug for field in definition.fields}
for field in self.get_fields(fields_spec):
if field.slug not in existing:
definition.fields.append(field)
def get_fields(self, spec):
return [
CustomDataField(
slug=slug,
is_required=False,
label=label,
choices=choices,
)
for slug, label, choices in spec
]
|
|
4db57a5fa786e9e209b428d4215b6033d15f1315
|
functest/tests/unit/features/test_copper.py
|
functest/tests/unit/features/test_copper.py
|
#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import copper
from functest.utils import constants
class CopperTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.copper = copper.Copper()
def test_init(self):
self.assertEqual(self.copper.project_name, "copper")
self.assertEqual(self.copper.case_name, "copper-notification")
self.assertEqual(
self.copper.repo,
constants.CONST.__getattribute__("dir_repo_copper"))
self.assertEqual(
self.copper.cmd,
"cd {}/tests && bash run.sh && cd -".format(self.copper.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
Add unit tests for copper
|
Add unit tests for copper
Change-Id: Ia4e53e2aee5b93071b3acd3d75c7e42841321a0a
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com>
|
Python
|
apache-2.0
|
opnfv/functest,mywulin/functest,mywulin/functest,opnfv/functest
|
Add unit tests for copper
Change-Id: Ia4e53e2aee5b93071b3acd3d75c7e42841321a0a
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com>
|
#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import copper
from functest.utils import constants
class CopperTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.copper = copper.Copper()
def test_init(self):
self.assertEqual(self.copper.project_name, "copper")
self.assertEqual(self.copper.case_name, "copper-notification")
self.assertEqual(
self.copper.repo,
constants.CONST.__getattribute__("dir_repo_copper"))
self.assertEqual(
self.copper.cmd,
"cd {}/tests && bash run.sh && cd -".format(self.copper.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
<commit_before><commit_msg>Add unit tests for copper
Change-Id: Ia4e53e2aee5b93071b3acd3d75c7e42841321a0a
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com><commit_after>
|
#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import copper
from functest.utils import constants
class CopperTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.copper = copper.Copper()
def test_init(self):
self.assertEqual(self.copper.project_name, "copper")
self.assertEqual(self.copper.case_name, "copper-notification")
self.assertEqual(
self.copper.repo,
constants.CONST.__getattribute__("dir_repo_copper"))
self.assertEqual(
self.copper.cmd,
"cd {}/tests && bash run.sh && cd -".format(self.copper.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
Add unit tests for copper
Change-Id: Ia4e53e2aee5b93071b3acd3d75c7e42841321a0a
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com>#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import copper
from functest.utils import constants
class CopperTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.copper = copper.Copper()
def test_init(self):
self.assertEqual(self.copper.project_name, "copper")
self.assertEqual(self.copper.case_name, "copper-notification")
self.assertEqual(
self.copper.repo,
constants.CONST.__getattribute__("dir_repo_copper"))
self.assertEqual(
self.copper.cmd,
"cd {}/tests && bash run.sh && cd -".format(self.copper.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
<commit_before><commit_msg>Add unit tests for copper
Change-Id: Ia4e53e2aee5b93071b3acd3d75c7e42841321a0a
Signed-off-by: Cédric Ollivier <d48310251a4a484d041bc5d09a9ac4d86d20f793@orange.com><commit_after>#!/usr/bin/env python
# Copyright (c) 2017 Orange and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
# pylint: disable=missing-docstring
import logging
import unittest
from functest.opnfv_tests.features import copper
from functest.utils import constants
class CopperTesting(unittest.TestCase):
logging.disable(logging.CRITICAL)
def setUp(self):
self.copper = copper.Copper()
def test_init(self):
self.assertEqual(self.copper.project_name, "copper")
self.assertEqual(self.copper.case_name, "copper-notification")
self.assertEqual(
self.copper.repo,
constants.CONST.__getattribute__("dir_repo_copper"))
self.assertEqual(
self.copper.cmd,
"cd {}/tests && bash run.sh && cd -".format(self.copper.repo))
if __name__ == "__main__":
unittest.main(verbosity=2)
|
|
04b85be1ddc9bc32aba0129ea89b1779be598489
|
bot/multithreading/worker/pool/workers/limited_lifespan.py
|
bot/multithreading/worker/pool/workers/limited_lifespan.py
|
import queue
from bot.multithreading.worker import QueueWorker
class LimitedLifespanQueueWorker(QueueWorker):
def __init__(self, name: str, work_queue: queue.Queue, error_handler: callable, max_seconds_idle: int,
end_notify: callable):
"""
:param max_seconds_idle: Max seconds to wait for a new work to appear before ending the execution.
If it is None, it behaves as a QueueWorker, waiting forever.
"""
super().__init__(name, work_queue, error_handler)
self.max_seconds_idle = max_seconds_idle
self.end_notify = end_notify
def run(self):
while self._get_and_execute():
pass
def _get_and_execute(self):
"""
:return: True if it should continue running, False if it should end its execution.
"""
try:
work = self.queue.get(timeout=self.max_seconds_idle)
except queue.Empty:
# max_seconds_idle has been exhausted, exiting
self.end_notify()
return False
else:
self._work(work)
self.queue.task_done()
return True
|
Create a temporal worker that is running only when there is work to do, waiting max_seconds_idle before ending
|
Create a temporal worker that is running only when there is work to do, waiting max_seconds_idle before ending
|
Python
|
agpl-3.0
|
alvarogzp/telegram-bot,alvarogzp/telegram-bot
|
Create a temporal worker that is running only when there is work to do, waiting max_seconds_idle before ending
|
import queue
from bot.multithreading.worker import QueueWorker
class LimitedLifespanQueueWorker(QueueWorker):
def __init__(self, name: str, work_queue: queue.Queue, error_handler: callable, max_seconds_idle: int,
end_notify: callable):
"""
:param max_seconds_idle: Max seconds to wait for a new work to appear before ending the execution.
If it is None, it behaves as a QueueWorker, waiting forever.
"""
super().__init__(name, work_queue, error_handler)
self.max_seconds_idle = max_seconds_idle
self.end_notify = end_notify
def run(self):
while self._get_and_execute():
pass
def _get_and_execute(self):
"""
:return: True if it should continue running, False if it should end its execution.
"""
try:
work = self.queue.get(timeout=self.max_seconds_idle)
except queue.Empty:
# max_seconds_idle has been exhausted, exiting
self.end_notify()
return False
else:
self._work(work)
self.queue.task_done()
return True
|
<commit_before><commit_msg>Create a temporal worker that is running only when there is work to do, waiting max_seconds_idle before ending<commit_after>
|
import queue
from bot.multithreading.worker import QueueWorker
class LimitedLifespanQueueWorker(QueueWorker):
def __init__(self, name: str, work_queue: queue.Queue, error_handler: callable, max_seconds_idle: int,
end_notify: callable):
"""
:param max_seconds_idle: Max seconds to wait for a new work to appear before ending the execution.
If it is None, it behaves as a QueueWorker, waiting forever.
"""
super().__init__(name, work_queue, error_handler)
self.max_seconds_idle = max_seconds_idle
self.end_notify = end_notify
def run(self):
while self._get_and_execute():
pass
def _get_and_execute(self):
"""
:return: True if it should continue running, False if it should end its execution.
"""
try:
work = self.queue.get(timeout=self.max_seconds_idle)
except queue.Empty:
# max_seconds_idle has been exhausted, exiting
self.end_notify()
return False
else:
self._work(work)
self.queue.task_done()
return True
|
Create a temporal worker that is running only when there is work to do, waiting max_seconds_idle before endingimport queue
from bot.multithreading.worker import QueueWorker
class LimitedLifespanQueueWorker(QueueWorker):
def __init__(self, name: str, work_queue: queue.Queue, error_handler: callable, max_seconds_idle: int,
end_notify: callable):
"""
:param max_seconds_idle: Max seconds to wait for a new work to appear before ending the execution.
If it is None, it behaves as a QueueWorker, waiting forever.
"""
super().__init__(name, work_queue, error_handler)
self.max_seconds_idle = max_seconds_idle
self.end_notify = end_notify
def run(self):
while self._get_and_execute():
pass
def _get_and_execute(self):
"""
:return: True if it should continue running, False if it should end its execution.
"""
try:
work = self.queue.get(timeout=self.max_seconds_idle)
except queue.Empty:
# max_seconds_idle has been exhausted, exiting
self.end_notify()
return False
else:
self._work(work)
self.queue.task_done()
return True
|
<commit_before><commit_msg>Create a temporal worker that is running only when there is work to do, waiting max_seconds_idle before ending<commit_after>import queue
from bot.multithreading.worker import QueueWorker
class LimitedLifespanQueueWorker(QueueWorker):
def __init__(self, name: str, work_queue: queue.Queue, error_handler: callable, max_seconds_idle: int,
end_notify: callable):
"""
:param max_seconds_idle: Max seconds to wait for a new work to appear before ending the execution.
If it is None, it behaves as a QueueWorker, waiting forever.
"""
super().__init__(name, work_queue, error_handler)
self.max_seconds_idle = max_seconds_idle
self.end_notify = end_notify
def run(self):
while self._get_and_execute():
pass
def _get_and_execute(self):
"""
:return: True if it should continue running, False if it should end its execution.
"""
try:
work = self.queue.get(timeout=self.max_seconds_idle)
except queue.Empty:
# max_seconds_idle has been exhausted, exiting
self.end_notify()
return False
else:
self._work(work)
self.queue.task_done()
return True
|
|
9556916a2732da3681c044f5c7f5a78cda6ee25d
|
sigma_core/migrations/0008_auto_20160108_1618.py
|
sigma_core/migrations/0008_auto_20160108_1618.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-08 15:18
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sigma_core', '0007_auto_20160102_1647'),
]
operations = [
migrations.CreateModel(
name='GroupCustomField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('validator_values', models.CharField(max_length=1024)),
],
),
migrations.CreateModel(
name='GroupCustomFieldValue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(max_length=255)),
('field', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='values', to='sigma_core.GroupCustomField')),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='sigma_core.Group')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Validator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('display_name', models.CharField(max_length=255)),
('html_name', models.CharField(choices=[('text', 'Text'), ('none', 'None')], default='none', max_length=255)),
('values', models.CharField(max_length=1024)),
],
),
migrations.AddField(
model_name='groupcustomfield',
name='validator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='sigma_core.Validator'),
),
migrations.AddField(
model_name='group',
name='custom_fields',
field=models.ManyToManyField(related_name='_group_custom_fields_+', to='sigma_core.GroupCustomField'),
),
]
|
Add migrations for custom fields / groups custom fields
|
Add migrations for custom fields / groups custom fields
|
Python
|
agpl-3.0
|
ProjetSigma/backend,ProjetSigma/backend
|
Add migrations for custom fields / groups custom fields
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-08 15:18
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sigma_core', '0007_auto_20160102_1647'),
]
operations = [
migrations.CreateModel(
name='GroupCustomField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('validator_values', models.CharField(max_length=1024)),
],
),
migrations.CreateModel(
name='GroupCustomFieldValue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(max_length=255)),
('field', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='values', to='sigma_core.GroupCustomField')),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='sigma_core.Group')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Validator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('display_name', models.CharField(max_length=255)),
('html_name', models.CharField(choices=[('text', 'Text'), ('none', 'None')], default='none', max_length=255)),
('values', models.CharField(max_length=1024)),
],
),
migrations.AddField(
model_name='groupcustomfield',
name='validator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='sigma_core.Validator'),
),
migrations.AddField(
model_name='group',
name='custom_fields',
field=models.ManyToManyField(related_name='_group_custom_fields_+', to='sigma_core.GroupCustomField'),
),
]
|
<commit_before><commit_msg>Add migrations for custom fields / groups custom fields<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-08 15:18
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sigma_core', '0007_auto_20160102_1647'),
]
operations = [
migrations.CreateModel(
name='GroupCustomField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('validator_values', models.CharField(max_length=1024)),
],
),
migrations.CreateModel(
name='GroupCustomFieldValue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(max_length=255)),
('field', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='values', to='sigma_core.GroupCustomField')),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='sigma_core.Group')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Validator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('display_name', models.CharField(max_length=255)),
('html_name', models.CharField(choices=[('text', 'Text'), ('none', 'None')], default='none', max_length=255)),
('values', models.CharField(max_length=1024)),
],
),
migrations.AddField(
model_name='groupcustomfield',
name='validator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='sigma_core.Validator'),
),
migrations.AddField(
model_name='group',
name='custom_fields',
field=models.ManyToManyField(related_name='_group_custom_fields_+', to='sigma_core.GroupCustomField'),
),
]
|
Add migrations for custom fields / groups custom fields# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-08 15:18
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sigma_core', '0007_auto_20160102_1647'),
]
operations = [
migrations.CreateModel(
name='GroupCustomField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('validator_values', models.CharField(max_length=1024)),
],
),
migrations.CreateModel(
name='GroupCustomFieldValue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(max_length=255)),
('field', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='values', to='sigma_core.GroupCustomField')),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='sigma_core.Group')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Validator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('display_name', models.CharField(max_length=255)),
('html_name', models.CharField(choices=[('text', 'Text'), ('none', 'None')], default='none', max_length=255)),
('values', models.CharField(max_length=1024)),
],
),
migrations.AddField(
model_name='groupcustomfield',
name='validator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='sigma_core.Validator'),
),
migrations.AddField(
model_name='group',
name='custom_fields',
field=models.ManyToManyField(related_name='_group_custom_fields_+', to='sigma_core.GroupCustomField'),
),
]
|
<commit_before><commit_msg>Add migrations for custom fields / groups custom fields<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-08 15:18
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('sigma_core', '0007_auto_20160102_1647'),
]
operations = [
migrations.CreateModel(
name='GroupCustomField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('validator_values', models.CharField(max_length=1024)),
],
),
migrations.CreateModel(
name='GroupCustomFieldValue',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.CharField(max_length=255)),
('field', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='values', to='sigma_core.GroupCustomField')),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='sigma_core.Group')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Validator',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('display_name', models.CharField(max_length=255)),
('html_name', models.CharField(choices=[('text', 'Text'), ('none', 'None')], default='none', max_length=255)),
('values', models.CharField(max_length=1024)),
],
),
migrations.AddField(
model_name='groupcustomfield',
name='validator',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='sigma_core.Validator'),
),
migrations.AddField(
model_name='group',
name='custom_fields',
field=models.ManyToManyField(related_name='_group_custom_fields_+', to='sigma_core.GroupCustomField'),
),
]
|
|
bd16a5ccb8e0cc9b68ebd9ee2285c466e8fff32e
|
candidates/migrations/0016_migrate_data_to_extra_fields.py
|
candidates/migrations/0016_migrate_data_to_extra_fields.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
def from_person_extra_to_generic_fields(apps, schema_editor):
ExtraField = apps.get_model('candidates', 'ExtraField')
PersonExtraFieldValue = apps.get_model('candidates', 'PersonExtraFieldValue')
PersonExtra = apps.get_model('candidates', 'PersonExtra')
if settings.ELECTION_APP == 'cr':
p_field = ExtraField.objects.create(
key='profession',
type='line',
label=u'Profession',
)
elif settings.ELECTION_APP == 'bf_elections_2015':
c_field = ExtraField.objects.create(
key='cv',
type='longer-text',
label=u'CV or Résumé',
)
p_field = ExtraField.objects.create(
key='program',
type='longer-text',
label=u'Program',
)
for pe in PersonExtra.objects.all():
person = pe.base
PersonExtraFieldValue.objects.create(
person=person,
field=c_field,
value=pe.cv
)
PersonExtraFieldValue.objects.create(
person=person,
field=p_field,
value=pe.program
)
def from_generic_fields_to_person_extra(apps, schema_editor):
ExtraField = apps.get_model('candidates', 'ExtraField')
PersonExtraFieldValue = apps.get_model('candidates', 'PersonExtraFieldValue')
if settings.ELECTION_APP == 'bf_elections_2015':
for pefv in PersonExtraFieldValue.objects.select_related('field'):
pe = pefv.person.extra
if pefv.field.key == 'cv':
pe.cv = pefv.value
pe.save()
elif pefv.field.key == 'program':
pe.program = pefv.value
pe.save()
else:
print "Ignoring field with unknown key:", pefv.field.key
PersonExtraFieldValue.objects.all().delete()
ExtraField.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('candidates', '0015_add_configurable_extra_fields'),
]
operations = [
migrations.RunPython(
from_person_extra_to_generic_fields,
from_generic_fields_to_person_extra
)
]
|
Add a data migration for extra fields for BF and CR
|
Add a data migration for extra fields for BF and CR
|
Python
|
agpl-3.0
|
datamade/yournextmp-popit,datamade/yournextmp-popit,datamade/yournextmp-popit,datamade/yournextmp-popit,datamade/yournextmp-popit
|
Add a data migration for extra fields for BF and CR
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
def from_person_extra_to_generic_fields(apps, schema_editor):
ExtraField = apps.get_model('candidates', 'ExtraField')
PersonExtraFieldValue = apps.get_model('candidates', 'PersonExtraFieldValue')
PersonExtra = apps.get_model('candidates', 'PersonExtra')
if settings.ELECTION_APP == 'cr':
p_field = ExtraField.objects.create(
key='profession',
type='line',
label=u'Profession',
)
elif settings.ELECTION_APP == 'bf_elections_2015':
c_field = ExtraField.objects.create(
key='cv',
type='longer-text',
label=u'CV or Résumé',
)
p_field = ExtraField.objects.create(
key='program',
type='longer-text',
label=u'Program',
)
for pe in PersonExtra.objects.all():
person = pe.base
PersonExtraFieldValue.objects.create(
person=person,
field=c_field,
value=pe.cv
)
PersonExtraFieldValue.objects.create(
person=person,
field=p_field,
value=pe.program
)
def from_generic_fields_to_person_extra(apps, schema_editor):
ExtraField = apps.get_model('candidates', 'ExtraField')
PersonExtraFieldValue = apps.get_model('candidates', 'PersonExtraFieldValue')
if settings.ELECTION_APP == 'bf_elections_2015':
for pefv in PersonExtraFieldValue.objects.select_related('field'):
pe = pefv.person.extra
if pefv.field.key == 'cv':
pe.cv = pefv.value
pe.save()
elif pefv.field.key == 'program':
pe.program = pefv.value
pe.save()
else:
print "Ignoring field with unknown key:", pefv.field.key
PersonExtraFieldValue.objects.all().delete()
ExtraField.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('candidates', '0015_add_configurable_extra_fields'),
]
operations = [
migrations.RunPython(
from_person_extra_to_generic_fields,
from_generic_fields_to_person_extra
)
]
|
<commit_before><commit_msg>Add a data migration for extra fields for BF and CR<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
def from_person_extra_to_generic_fields(apps, schema_editor):
ExtraField = apps.get_model('candidates', 'ExtraField')
PersonExtraFieldValue = apps.get_model('candidates', 'PersonExtraFieldValue')
PersonExtra = apps.get_model('candidates', 'PersonExtra')
if settings.ELECTION_APP == 'cr':
p_field = ExtraField.objects.create(
key='profession',
type='line',
label=u'Profession',
)
elif settings.ELECTION_APP == 'bf_elections_2015':
c_field = ExtraField.objects.create(
key='cv',
type='longer-text',
label=u'CV or Résumé',
)
p_field = ExtraField.objects.create(
key='program',
type='longer-text',
label=u'Program',
)
for pe in PersonExtra.objects.all():
person = pe.base
PersonExtraFieldValue.objects.create(
person=person,
field=c_field,
value=pe.cv
)
PersonExtraFieldValue.objects.create(
person=person,
field=p_field,
value=pe.program
)
def from_generic_fields_to_person_extra(apps, schema_editor):
ExtraField = apps.get_model('candidates', 'ExtraField')
PersonExtraFieldValue = apps.get_model('candidates', 'PersonExtraFieldValue')
if settings.ELECTION_APP == 'bf_elections_2015':
for pefv in PersonExtraFieldValue.objects.select_related('field'):
pe = pefv.person.extra
if pefv.field.key == 'cv':
pe.cv = pefv.value
pe.save()
elif pefv.field.key == 'program':
pe.program = pefv.value
pe.save()
else:
print "Ignoring field with unknown key:", pefv.field.key
PersonExtraFieldValue.objects.all().delete()
ExtraField.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('candidates', '0015_add_configurable_extra_fields'),
]
operations = [
migrations.RunPython(
from_person_extra_to_generic_fields,
from_generic_fields_to_person_extra
)
]
|
Add a data migration for extra fields for BF and CR# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
def from_person_extra_to_generic_fields(apps, schema_editor):
ExtraField = apps.get_model('candidates', 'ExtraField')
PersonExtraFieldValue = apps.get_model('candidates', 'PersonExtraFieldValue')
PersonExtra = apps.get_model('candidates', 'PersonExtra')
if settings.ELECTION_APP == 'cr':
p_field = ExtraField.objects.create(
key='profession',
type='line',
label=u'Profession',
)
elif settings.ELECTION_APP == 'bf_elections_2015':
c_field = ExtraField.objects.create(
key='cv',
type='longer-text',
label=u'CV or Résumé',
)
p_field = ExtraField.objects.create(
key='program',
type='longer-text',
label=u'Program',
)
for pe in PersonExtra.objects.all():
person = pe.base
PersonExtraFieldValue.objects.create(
person=person,
field=c_field,
value=pe.cv
)
PersonExtraFieldValue.objects.create(
person=person,
field=p_field,
value=pe.program
)
def from_generic_fields_to_person_extra(apps, schema_editor):
ExtraField = apps.get_model('candidates', 'ExtraField')
PersonExtraFieldValue = apps.get_model('candidates', 'PersonExtraFieldValue')
if settings.ELECTION_APP == 'bf_elections_2015':
for pefv in PersonExtraFieldValue.objects.select_related('field'):
pe = pefv.person.extra
if pefv.field.key == 'cv':
pe.cv = pefv.value
pe.save()
elif pefv.field.key == 'program':
pe.program = pefv.value
pe.save()
else:
print "Ignoring field with unknown key:", pefv.field.key
PersonExtraFieldValue.objects.all().delete()
ExtraField.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('candidates', '0015_add_configurable_extra_fields'),
]
operations = [
migrations.RunPython(
from_person_extra_to_generic_fields,
from_generic_fields_to_person_extra
)
]
|
<commit_before><commit_msg>Add a data migration for extra fields for BF and CR<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
def from_person_extra_to_generic_fields(apps, schema_editor):
ExtraField = apps.get_model('candidates', 'ExtraField')
PersonExtraFieldValue = apps.get_model('candidates', 'PersonExtraFieldValue')
PersonExtra = apps.get_model('candidates', 'PersonExtra')
if settings.ELECTION_APP == 'cr':
p_field = ExtraField.objects.create(
key='profession',
type='line',
label=u'Profession',
)
elif settings.ELECTION_APP == 'bf_elections_2015':
c_field = ExtraField.objects.create(
key='cv',
type='longer-text',
label=u'CV or Résumé',
)
p_field = ExtraField.objects.create(
key='program',
type='longer-text',
label=u'Program',
)
for pe in PersonExtra.objects.all():
person = pe.base
PersonExtraFieldValue.objects.create(
person=person,
field=c_field,
value=pe.cv
)
PersonExtraFieldValue.objects.create(
person=person,
field=p_field,
value=pe.program
)
def from_generic_fields_to_person_extra(apps, schema_editor):
ExtraField = apps.get_model('candidates', 'ExtraField')
PersonExtraFieldValue = apps.get_model('candidates', 'PersonExtraFieldValue')
if settings.ELECTION_APP == 'bf_elections_2015':
for pefv in PersonExtraFieldValue.objects.select_related('field'):
pe = pefv.person.extra
if pefv.field.key == 'cv':
pe.cv = pefv.value
pe.save()
elif pefv.field.key == 'program':
pe.program = pefv.value
pe.save()
else:
print "Ignoring field with unknown key:", pefv.field.key
PersonExtraFieldValue.objects.all().delete()
ExtraField.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
('candidates', '0015_add_configurable_extra_fields'),
]
operations = [
migrations.RunPython(
from_person_extra_to_generic_fields,
from_generic_fields_to_person_extra
)
]
|
|
b20fbe03717183cb45da81179fd0b6886f2b6b2a
|
alembic/versions/96ca40f7c6c2_add_creator.py
|
alembic/versions/96ca40f7c6c2_add_creator.py
|
"""add creator
Revision ID: 96ca40f7c6c2
Revises: ef6fef5147b2
Create Date: 2016-10-27 15:14:18.031571
"""
# revision identifiers, used by Alembic.
revision = '96ca40f7c6c2'
down_revision = 'ef6fef5147b2'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_foreign_key(op.f('fk_kingdom_user_creator'), 'kingdom', 'user', ['creator'], ['uuid'])
def downgrade():
op.drop_constraint(op.f('fk_kingdomrating_user_creator'), 'kingdomrating', type_='foreignkey')
|
Add missing kingdom/user foreign key
|
Add missing kingdom/user foreign key
|
Python
|
mit
|
EliRibble/dominus,EliRibble/dominus,EliRibble/dominus,EliRibble/dominus
|
Add missing kingdom/user foreign key
|
"""add creator
Revision ID: 96ca40f7c6c2
Revises: ef6fef5147b2
Create Date: 2016-10-27 15:14:18.031571
"""
# revision identifiers, used by Alembic.
revision = '96ca40f7c6c2'
down_revision = 'ef6fef5147b2'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_foreign_key(op.f('fk_kingdom_user_creator'), 'kingdom', 'user', ['creator'], ['uuid'])
def downgrade():
op.drop_constraint(op.f('fk_kingdomrating_user_creator'), 'kingdomrating', type_='foreignkey')
|
<commit_before><commit_msg>Add missing kingdom/user foreign key<commit_after>
|
"""add creator
Revision ID: 96ca40f7c6c2
Revises: ef6fef5147b2
Create Date: 2016-10-27 15:14:18.031571
"""
# revision identifiers, used by Alembic.
revision = '96ca40f7c6c2'
down_revision = 'ef6fef5147b2'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_foreign_key(op.f('fk_kingdom_user_creator'), 'kingdom', 'user', ['creator'], ['uuid'])
def downgrade():
op.drop_constraint(op.f('fk_kingdomrating_user_creator'), 'kingdomrating', type_='foreignkey')
|
Add missing kingdom/user foreign key"""add creator
Revision ID: 96ca40f7c6c2
Revises: ef6fef5147b2
Create Date: 2016-10-27 15:14:18.031571
"""
# revision identifiers, used by Alembic.
revision = '96ca40f7c6c2'
down_revision = 'ef6fef5147b2'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_foreign_key(op.f('fk_kingdom_user_creator'), 'kingdom', 'user', ['creator'], ['uuid'])
def downgrade():
op.drop_constraint(op.f('fk_kingdomrating_user_creator'), 'kingdomrating', type_='foreignkey')
|
<commit_before><commit_msg>Add missing kingdom/user foreign key<commit_after>"""add creator
Revision ID: 96ca40f7c6c2
Revises: ef6fef5147b2
Create Date: 2016-10-27 15:14:18.031571
"""
# revision identifiers, used by Alembic.
revision = '96ca40f7c6c2'
down_revision = 'ef6fef5147b2'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
op.create_foreign_key(op.f('fk_kingdom_user_creator'), 'kingdom', 'user', ['creator'], ['uuid'])
def downgrade():
op.drop_constraint(op.f('fk_kingdomrating_user_creator'), 'kingdomrating', type_='foreignkey')
|
|
808c00dd295fce89a5c8bde7b20bd558e7c674a2
|
grammpy-transforms/ChomskyHiearchy/__init__.py
|
grammpy-transforms/ChomskyHiearchy/__init__.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy-transforms
"""
|
Add package for transforming context-free grammar into Chomskey hiearchy
|
Add package for transforming context-free grammar into Chomskey hiearchy
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add package for transforming context-free grammar into Chomskey hiearchy
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy-transforms
"""
|
<commit_before><commit_msg>Add package for transforming context-free grammar into Chomskey hiearchy<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy-transforms
"""
|
Add package for transforming context-free grammar into Chomskey hiearchy#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy-transforms
"""
|
<commit_before><commit_msg>Add package for transforming context-free grammar into Chomskey hiearchy<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy-transforms
"""
|
|
77eb463bde029956557a1e9abedbef22ec21f647
|
examples/list_windows_updates.py
|
examples/list_windows_updates.py
|
"""
Example script for listing installed updates on Windows 10
Requirements:
- Windows 10 (may work on Win7+)
- pywinauto 0.6.1+
This example opens "Control Panel", navigates to "Installed Updates" page
and lists all updates (for all apps) as well as OS Windows updates only.
"""
from __future__ import print_function
from pywinauto import Application
# Open "Control Panel"
Application().start('control.exe')
app = Application(backend='uia').connect(path='explorer.exe', title='Control Panel')
# Go to "Programs"
app.window(title='Control Panel').ProgramsHyperlink.invoke()
app.wait_cpu_usage_lower(threshold=0.5, timeout=30, usage_interval=1.0)
# Go to "Installed Updates"
app.window(title='Programs').child_window(title='View installed updates', control_type='Hyperlink').invoke()
app.wait_cpu_usage_lower(threshold=0.5, timeout=30, usage_interval=1.0)
list_box = app.InstalledUpdates.FolderViewListBox
# list all updates
items = list_box.descendants(control_type='ListItem')
all_updates = [item.window_text() for item in items]
print('\nAll updates ({}):\n'.format(len(all_updates)))
print(all_updates)
# list updates from "Microsoft Windows" group only
windows_group_box = list_box.child_window(title_re='^Microsoft Windows.*', control_type='Group')
windows_items = windows_group_box.descendants(control_type='ListItem')
windows_updates = [item.window_text() for item in windows_items]
print('\nWindows updates only ({}):\n'.format(len(windows_updates)))
print(windows_updates)
|
Add an example listing installed Windows updates.
|
Add an example listing installed Windows updates.
|
Python
|
bsd-3-clause
|
airelil/pywinauto,vasily-v-ryabov/pywinauto,cetygamer/pywinauto,pywinauto/pywinauto,drinkertea/pywinauto
|
Add an example listing installed Windows updates.
|
"""
Example script for listing installed updates on Windows 10
Requirements:
- Windows 10 (may work on Win7+)
- pywinauto 0.6.1+
This example opens "Control Panel", navigates to "Installed Updates" page
and lists all updates (for all apps) as well as OS Windows updates only.
"""
from __future__ import print_function
from pywinauto import Application
# Open "Control Panel"
Application().start('control.exe')
app = Application(backend='uia').connect(path='explorer.exe', title='Control Panel')
# Go to "Programs"
app.window(title='Control Panel').ProgramsHyperlink.invoke()
app.wait_cpu_usage_lower(threshold=0.5, timeout=30, usage_interval=1.0)
# Go to "Installed Updates"
app.window(title='Programs').child_window(title='View installed updates', control_type='Hyperlink').invoke()
app.wait_cpu_usage_lower(threshold=0.5, timeout=30, usage_interval=1.0)
list_box = app.InstalledUpdates.FolderViewListBox
# list all updates
items = list_box.descendants(control_type='ListItem')
all_updates = [item.window_text() for item in items]
print('\nAll updates ({}):\n'.format(len(all_updates)))
print(all_updates)
# list updates from "Microsoft Windows" group only
windows_group_box = list_box.child_window(title_re='^Microsoft Windows.*', control_type='Group')
windows_items = windows_group_box.descendants(control_type='ListItem')
windows_updates = [item.window_text() for item in windows_items]
print('\nWindows updates only ({}):\n'.format(len(windows_updates)))
print(windows_updates)
|
<commit_before><commit_msg>Add an example listing installed Windows updates.<commit_after>
|
"""
Example script for listing installed updates on Windows 10
Requirements:
- Windows 10 (may work on Win7+)
- pywinauto 0.6.1+
This example opens "Control Panel", navigates to "Installed Updates" page
and lists all updates (for all apps) as well as OS Windows updates only.
"""
from __future__ import print_function
from pywinauto import Application
# Open "Control Panel"
Application().start('control.exe')
app = Application(backend='uia').connect(path='explorer.exe', title='Control Panel')
# Go to "Programs"
app.window(title='Control Panel').ProgramsHyperlink.invoke()
app.wait_cpu_usage_lower(threshold=0.5, timeout=30, usage_interval=1.0)
# Go to "Installed Updates"
app.window(title='Programs').child_window(title='View installed updates', control_type='Hyperlink').invoke()
app.wait_cpu_usage_lower(threshold=0.5, timeout=30, usage_interval=1.0)
list_box = app.InstalledUpdates.FolderViewListBox
# list all updates
items = list_box.descendants(control_type='ListItem')
all_updates = [item.window_text() for item in items]
print('\nAll updates ({}):\n'.format(len(all_updates)))
print(all_updates)
# list updates from "Microsoft Windows" group only
windows_group_box = list_box.child_window(title_re='^Microsoft Windows.*', control_type='Group')
windows_items = windows_group_box.descendants(control_type='ListItem')
windows_updates = [item.window_text() for item in windows_items]
print('\nWindows updates only ({}):\n'.format(len(windows_updates)))
print(windows_updates)
|
Add an example listing installed Windows updates."""
Example script for listing installed updates on Windows 10
Requirements:
- Windows 10 (may work on Win7+)
- pywinauto 0.6.1+
This example opens "Control Panel", navigates to "Installed Updates" page
and lists all updates (for all apps) as well as OS Windows updates only.
"""
from __future__ import print_function
from pywinauto import Application
# Open "Control Panel"
Application().start('control.exe')
app = Application(backend='uia').connect(path='explorer.exe', title='Control Panel')
# Go to "Programs"
app.window(title='Control Panel').ProgramsHyperlink.invoke()
app.wait_cpu_usage_lower(threshold=0.5, timeout=30, usage_interval=1.0)
# Go to "Installed Updates"
app.window(title='Programs').child_window(title='View installed updates', control_type='Hyperlink').invoke()
app.wait_cpu_usage_lower(threshold=0.5, timeout=30, usage_interval=1.0)
list_box = app.InstalledUpdates.FolderViewListBox
# list all updates
items = list_box.descendants(control_type='ListItem')
all_updates = [item.window_text() for item in items]
print('\nAll updates ({}):\n'.format(len(all_updates)))
print(all_updates)
# list updates from "Microsoft Windows" group only
windows_group_box = list_box.child_window(title_re='^Microsoft Windows.*', control_type='Group')
windows_items = windows_group_box.descendants(control_type='ListItem')
windows_updates = [item.window_text() for item in windows_items]
print('\nWindows updates only ({}):\n'.format(len(windows_updates)))
print(windows_updates)
|
<commit_before><commit_msg>Add an example listing installed Windows updates.<commit_after>"""
Example script for listing installed updates on Windows 10
Requirements:
- Windows 10 (may work on Win7+)
- pywinauto 0.6.1+
This example opens "Control Panel", navigates to "Installed Updates" page
and lists all updates (for all apps) as well as OS Windows updates only.
"""
from __future__ import print_function
from pywinauto import Application
# Open "Control Panel"
Application().start('control.exe')
app = Application(backend='uia').connect(path='explorer.exe', title='Control Panel')
# Go to "Programs"
app.window(title='Control Panel').ProgramsHyperlink.invoke()
app.wait_cpu_usage_lower(threshold=0.5, timeout=30, usage_interval=1.0)
# Go to "Installed Updates"
app.window(title='Programs').child_window(title='View installed updates', control_type='Hyperlink').invoke()
app.wait_cpu_usage_lower(threshold=0.5, timeout=30, usage_interval=1.0)
list_box = app.InstalledUpdates.FolderViewListBox
# list all updates
items = list_box.descendants(control_type='ListItem')
all_updates = [item.window_text() for item in items]
print('\nAll updates ({}):\n'.format(len(all_updates)))
print(all_updates)
# list updates from "Microsoft Windows" group only
windows_group_box = list_box.child_window(title_re='^Microsoft Windows.*', control_type='Group')
windows_items = windows_group_box.descendants(control_type='ListItem')
windows_updates = [item.window_text() for item in windows_items]
print('\nWindows updates only ({}):\n'.format(len(windows_updates)))
print(windows_updates)
|
|
f499f58c765cbd83e77e44be1dfbccc3aed772c6
|
mozillians/users/management/commands/reindex_mozillians.py
|
mozillians/users/management/commands/reindex_mozillians.py
|
from django.core.management.base import BaseCommand
from mozillians.users.tasks import index_all_profiles
class Command(BaseCommand):
def handle(self, *args, **options):
index_all_profiles()
|
Add management command to reindex mozillians ES.
|
Add management command to reindex mozillians ES.
|
Python
|
bsd-3-clause
|
akatsoulas/mozillians,mozilla/mozillians,johngian/mozillians,mozilla/mozillians,mozilla/mozillians,johngian/mozillians,akatsoulas/mozillians,johngian/mozillians,akatsoulas/mozillians,mozilla/mozillians,akatsoulas/mozillians,johngian/mozillians
|
Add management command to reindex mozillians ES.
|
from django.core.management.base import BaseCommand
from mozillians.users.tasks import index_all_profiles
class Command(BaseCommand):
def handle(self, *args, **options):
index_all_profiles()
|
<commit_before><commit_msg>Add management command to reindex mozillians ES.<commit_after>
|
from django.core.management.base import BaseCommand
from mozillians.users.tasks import index_all_profiles
class Command(BaseCommand):
def handle(self, *args, **options):
index_all_profiles()
|
Add management command to reindex mozillians ES.from django.core.management.base import BaseCommand
from mozillians.users.tasks import index_all_profiles
class Command(BaseCommand):
def handle(self, *args, **options):
index_all_profiles()
|
<commit_before><commit_msg>Add management command to reindex mozillians ES.<commit_after>from django.core.management.base import BaseCommand
from mozillians.users.tasks import index_all_profiles
class Command(BaseCommand):
def handle(self, *args, **options):
index_all_profiles()
|
|
e727f062fff4f8b522a5637dc617ac57b1850021
|
ghidra_plugin_ioncube_decrypt.py
|
ghidra_plugin_ioncube_decrypt.py
|
# Decrypts "encrypted" strings from ioncube's loaders
#@author ss23
#@category _NEW_
#@keybinding
#@menupath
#@toolbar
encryption_key = [0x25,0x68,0xd3,0xc2,0x28,0xf2,0x59,0x2e,0x94,0xee,0xf2,0x91,0xac,0x13,0x96,0x95]
def attemptDecrypt(addr):
tmplength = getByte(addr)
if tmplength < 0:
length = tmplength + 256
else:
length = tmplength
#print length
content = getBytes(addr.next(), length)
# Convert negatives into positives
# TODO: Surely there's an API call for this
new_content = []
for i in range(0, length):
# jython why
if content[i] < 0:
new_content.append(content[i] + 256)
else:
new_content.append(content[i])
decrypted_string = ""
# Decrypt the content
for i in range(0, length):
decrypted_string += chr(new_content[i] ^ encryption_key[(length + i) % len(encryption_key)])
return decrypted_string
funcs = getGlobalFunctions("ioncube_decrypt")
if len(funcs) < 1:
print "Could not identify ioncube_decrypt function"
exit()
elif len(funcs) > 1:
print "Too many ioncube_decrypt functions identified"
exit()
refs = getReferencesTo(funcs[0].getEntryPoint())
for ref in refs:
addr = ref.getFromAddress()
# instruction before should be the "push encrypted_string" we want
instr = getInstructionBefore(addr)
if (type(instr) == type(None)):
continue
possible_data_addr = instr.getOpObjects(0)[0]
# Java!
addr_factory = getAddressFactory()
# Get the assumed-length
possible_data_addr_str = possible_data_addr.toString()
possible_data_addr = addr_factory.getAddress(possible_data_addr_str)
decrypted_string = attemptDecrypt(possible_data_addr)
# TODO: Figure out how to set repeatable comments on a symbol / address
# TODO: Do not duplicate comments
setPreComment(possible_data_addr, "decrypted: " + decrypted_string)
#print possible_data_addr
print "Completed"
|
Add a Ghidra pluglin for decrypting strings
|
Add a Ghidra pluglin for decrypting strings
|
Python
|
bsd-2-clause
|
ss23/ioncube-string-decoder,ss23/ioncube-string-decoder
|
Add a Ghidra pluglin for decrypting strings
|
# Decrypts "encrypted" strings from ioncube's loaders
#@author ss23
#@category _NEW_
#@keybinding
#@menupath
#@toolbar
encryption_key = [0x25,0x68,0xd3,0xc2,0x28,0xf2,0x59,0x2e,0x94,0xee,0xf2,0x91,0xac,0x13,0x96,0x95]
def attemptDecrypt(addr):
tmplength = getByte(addr)
if tmplength < 0:
length = tmplength + 256
else:
length = tmplength
#print length
content = getBytes(addr.next(), length)
# Convert negatives into positives
# TODO: Surely there's an API call for this
new_content = []
for i in range(0, length):
# jython why
if content[i] < 0:
new_content.append(content[i] + 256)
else:
new_content.append(content[i])
decrypted_string = ""
# Decrypt the content
for i in range(0, length):
decrypted_string += chr(new_content[i] ^ encryption_key[(length + i) % len(encryption_key)])
return decrypted_string
funcs = getGlobalFunctions("ioncube_decrypt")
if len(funcs) < 1:
print "Could not identify ioncube_decrypt function"
exit()
elif len(funcs) > 1:
print "Too many ioncube_decrypt functions identified"
exit()
refs = getReferencesTo(funcs[0].getEntryPoint())
for ref in refs:
addr = ref.getFromAddress()
# instruction before should be the "push encrypted_string" we want
instr = getInstructionBefore(addr)
if (type(instr) == type(None)):
continue
possible_data_addr = instr.getOpObjects(0)[0]
# Java!
addr_factory = getAddressFactory()
# Get the assumed-length
possible_data_addr_str = possible_data_addr.toString()
possible_data_addr = addr_factory.getAddress(possible_data_addr_str)
decrypted_string = attemptDecrypt(possible_data_addr)
# TODO: Figure out how to set repeatable comments on a symbol / address
# TODO: Do not duplicate comments
setPreComment(possible_data_addr, "decrypted: " + decrypted_string)
#print possible_data_addr
print "Completed"
|
<commit_before><commit_msg>Add a Ghidra pluglin for decrypting strings<commit_after>
|
# Decrypts "encrypted" strings from ioncube's loaders
#@author ss23
#@category _NEW_
#@keybinding
#@menupath
#@toolbar
encryption_key = [0x25,0x68,0xd3,0xc2,0x28,0xf2,0x59,0x2e,0x94,0xee,0xf2,0x91,0xac,0x13,0x96,0x95]
def attemptDecrypt(addr):
tmplength = getByte(addr)
if tmplength < 0:
length = tmplength + 256
else:
length = tmplength
#print length
content = getBytes(addr.next(), length)
# Convert negatives into positives
# TODO: Surely there's an API call for this
new_content = []
for i in range(0, length):
# jython why
if content[i] < 0:
new_content.append(content[i] + 256)
else:
new_content.append(content[i])
decrypted_string = ""
# Decrypt the content
for i in range(0, length):
decrypted_string += chr(new_content[i] ^ encryption_key[(length + i) % len(encryption_key)])
return decrypted_string
funcs = getGlobalFunctions("ioncube_decrypt")
if len(funcs) < 1:
print "Could not identify ioncube_decrypt function"
exit()
elif len(funcs) > 1:
print "Too many ioncube_decrypt functions identified"
exit()
refs = getReferencesTo(funcs[0].getEntryPoint())
for ref in refs:
addr = ref.getFromAddress()
# instruction before should be the "push encrypted_string" we want
instr = getInstructionBefore(addr)
if (type(instr) == type(None)):
continue
possible_data_addr = instr.getOpObjects(0)[0]
# Java!
addr_factory = getAddressFactory()
# Get the assumed-length
possible_data_addr_str = possible_data_addr.toString()
possible_data_addr = addr_factory.getAddress(possible_data_addr_str)
decrypted_string = attemptDecrypt(possible_data_addr)
# TODO: Figure out how to set repeatable comments on a symbol / address
# TODO: Do not duplicate comments
setPreComment(possible_data_addr, "decrypted: " + decrypted_string)
#print possible_data_addr
print "Completed"
|
Add a Ghidra pluglin for decrypting strings# Decrypts "encrypted" strings from ioncube's loaders
#@author ss23
#@category _NEW_
#@keybinding
#@menupath
#@toolbar
encryption_key = [0x25,0x68,0xd3,0xc2,0x28,0xf2,0x59,0x2e,0x94,0xee,0xf2,0x91,0xac,0x13,0x96,0x95]
def attemptDecrypt(addr):
tmplength = getByte(addr)
if tmplength < 0:
length = tmplength + 256
else:
length = tmplength
#print length
content = getBytes(addr.next(), length)
# Convert negatives into positives
# TODO: Surely there's an API call for this
new_content = []
for i in range(0, length):
# jython why
if content[i] < 0:
new_content.append(content[i] + 256)
else:
new_content.append(content[i])
decrypted_string = ""
# Decrypt the content
for i in range(0, length):
decrypted_string += chr(new_content[i] ^ encryption_key[(length + i) % len(encryption_key)])
return decrypted_string
funcs = getGlobalFunctions("ioncube_decrypt")
if len(funcs) < 1:
print "Could not identify ioncube_decrypt function"
exit()
elif len(funcs) > 1:
print "Too many ioncube_decrypt functions identified"
exit()
refs = getReferencesTo(funcs[0].getEntryPoint())
for ref in refs:
addr = ref.getFromAddress()
# instruction before should be the "push encrypted_string" we want
instr = getInstructionBefore(addr)
if (type(instr) == type(None)):
continue
possible_data_addr = instr.getOpObjects(0)[0]
# Java!
addr_factory = getAddressFactory()
# Get the assumed-length
possible_data_addr_str = possible_data_addr.toString()
possible_data_addr = addr_factory.getAddress(possible_data_addr_str)
decrypted_string = attemptDecrypt(possible_data_addr)
# TODO: Figure out how to set repeatable comments on a symbol / address
# TODO: Do not duplicate comments
setPreComment(possible_data_addr, "decrypted: " + decrypted_string)
#print possible_data_addr
print "Completed"
|
<commit_before><commit_msg>Add a Ghidra pluglin for decrypting strings<commit_after># Decrypts "encrypted" strings from ioncube's loaders
#@author ss23
#@category _NEW_
#@keybinding
#@menupath
#@toolbar
encryption_key = [0x25,0x68,0xd3,0xc2,0x28,0xf2,0x59,0x2e,0x94,0xee,0xf2,0x91,0xac,0x13,0x96,0x95]
def attemptDecrypt(addr):
tmplength = getByte(addr)
if tmplength < 0:
length = tmplength + 256
else:
length = tmplength
#print length
content = getBytes(addr.next(), length)
# Convert negatives into positives
# TODO: Surely there's an API call for this
new_content = []
for i in range(0, length):
# jython why
if content[i] < 0:
new_content.append(content[i] + 256)
else:
new_content.append(content[i])
decrypted_string = ""
# Decrypt the content
for i in range(0, length):
decrypted_string += chr(new_content[i] ^ encryption_key[(length + i) % len(encryption_key)])
return decrypted_string
funcs = getGlobalFunctions("ioncube_decrypt")
if len(funcs) < 1:
print "Could not identify ioncube_decrypt function"
exit()
elif len(funcs) > 1:
print "Too many ioncube_decrypt functions identified"
exit()
refs = getReferencesTo(funcs[0].getEntryPoint())
for ref in refs:
addr = ref.getFromAddress()
# instruction before should be the "push encrypted_string" we want
instr = getInstructionBefore(addr)
if (type(instr) == type(None)):
continue
possible_data_addr = instr.getOpObjects(0)[0]
# Java!
addr_factory = getAddressFactory()
# Get the assumed-length
possible_data_addr_str = possible_data_addr.toString()
possible_data_addr = addr_factory.getAddress(possible_data_addr_str)
decrypted_string = attemptDecrypt(possible_data_addr)
# TODO: Figure out how to set repeatable comments on a symbol / address
# TODO: Do not duplicate comments
setPreComment(possible_data_addr, "decrypted: " + decrypted_string)
#print possible_data_addr
print "Completed"
|
|
7408c08e8550dddbdf02681fcf5c376a24f8f1f8
|
zinnia_twitter/__init__.py
|
zinnia_twitter/__init__.py
|
"""Twitter plugin for Django-blog-zinnia"""
__version__ = '1.0'
__license__ = 'BSD License'
__author__ = 'Fantomas42'
__email__ = 'fantomas42@gmail.com'
__url__ = 'https://github.com/Fantomas42/zinnia-twitter'
|
Create zinnia_twitter module and add metadatas
|
Create zinnia_twitter module and add metadatas
|
Python
|
bsd-3-clause
|
django-blog-zinnia/zinnia-twitter
|
Create zinnia_twitter module and add metadatas
|
"""Twitter plugin for Django-blog-zinnia"""
__version__ = '1.0'
__license__ = 'BSD License'
__author__ = 'Fantomas42'
__email__ = 'fantomas42@gmail.com'
__url__ = 'https://github.com/Fantomas42/zinnia-twitter'
|
<commit_before><commit_msg>Create zinnia_twitter module and add metadatas<commit_after>
|
"""Twitter plugin for Django-blog-zinnia"""
__version__ = '1.0'
__license__ = 'BSD License'
__author__ = 'Fantomas42'
__email__ = 'fantomas42@gmail.com'
__url__ = 'https://github.com/Fantomas42/zinnia-twitter'
|
Create zinnia_twitter module and add metadatas"""Twitter plugin for Django-blog-zinnia"""
__version__ = '1.0'
__license__ = 'BSD License'
__author__ = 'Fantomas42'
__email__ = 'fantomas42@gmail.com'
__url__ = 'https://github.com/Fantomas42/zinnia-twitter'
|
<commit_before><commit_msg>Create zinnia_twitter module and add metadatas<commit_after>"""Twitter plugin for Django-blog-zinnia"""
__version__ = '1.0'
__license__ = 'BSD License'
__author__ = 'Fantomas42'
__email__ = 'fantomas42@gmail.com'
__url__ = 'https://github.com/Fantomas42/zinnia-twitter'
|
|
e215dc670cc258d3ec0d559f06e6fdfb7f37f845
|
Underline.py
|
Underline.py
|
# -*- coding: utf-8 -*-
import re
import sublime, sublime_plugin
class UnderlineCommand(sublime_plugin.TextCommand):
def run(self, edit):
sel = self.view.sel()[0]
line = self.view.substr(self.view.line(sel))
underline = "\n" + ("-" * len(line))
insertPos = sel
while(self.view.substr(sublime.Region(insertPos.a, insertPos.a+1)) != '\n' and insertPos.a < self.view.size()):
insertPos = sublime.Region(insertPos.a+1, insertPos.a+1)
if (insertPos.a == self.view.size()):
underline += "\n"
self.view.insert(edit, insertPos.begin(), underline)
self.view.sel().clear()
self.view.sel().add(sublime.Region(insertPos.a+len(underline)+1, insertPos.a+len(underline)+1))
|
Create an underline for the selected text
|
Create an underline for the selected text
|
Python
|
mit
|
RichardHyde/SublimeText.Packages
|
Create an underline for the selected text
|
# -*- coding: utf-8 -*-
import re
import sublime, sublime_plugin
class UnderlineCommand(sublime_plugin.TextCommand):
def run(self, edit):
sel = self.view.sel()[0]
line = self.view.substr(self.view.line(sel))
underline = "\n" + ("-" * len(line))
insertPos = sel
while(self.view.substr(sublime.Region(insertPos.a, insertPos.a+1)) != '\n' and insertPos.a < self.view.size()):
insertPos = sublime.Region(insertPos.a+1, insertPos.a+1)
if (insertPos.a == self.view.size()):
underline += "\n"
self.view.insert(edit, insertPos.begin(), underline)
self.view.sel().clear()
self.view.sel().add(sublime.Region(insertPos.a+len(underline)+1, insertPos.a+len(underline)+1))
|
<commit_before><commit_msg>Create an underline for the selected text<commit_after>
|
# -*- coding: utf-8 -*-
import re
import sublime, sublime_plugin
class UnderlineCommand(sublime_plugin.TextCommand):
def run(self, edit):
sel = self.view.sel()[0]
line = self.view.substr(self.view.line(sel))
underline = "\n" + ("-" * len(line))
insertPos = sel
while(self.view.substr(sublime.Region(insertPos.a, insertPos.a+1)) != '\n' and insertPos.a < self.view.size()):
insertPos = sublime.Region(insertPos.a+1, insertPos.a+1)
if (insertPos.a == self.view.size()):
underline += "\n"
self.view.insert(edit, insertPos.begin(), underline)
self.view.sel().clear()
self.view.sel().add(sublime.Region(insertPos.a+len(underline)+1, insertPos.a+len(underline)+1))
|
Create an underline for the selected text# -*- coding: utf-8 -*-
import re
import sublime, sublime_plugin
class UnderlineCommand(sublime_plugin.TextCommand):
def run(self, edit):
sel = self.view.sel()[0]
line = self.view.substr(self.view.line(sel))
underline = "\n" + ("-" * len(line))
insertPos = sel
while(self.view.substr(sublime.Region(insertPos.a, insertPos.a+1)) != '\n' and insertPos.a < self.view.size()):
insertPos = sublime.Region(insertPos.a+1, insertPos.a+1)
if (insertPos.a == self.view.size()):
underline += "\n"
self.view.insert(edit, insertPos.begin(), underline)
self.view.sel().clear()
self.view.sel().add(sublime.Region(insertPos.a+len(underline)+1, insertPos.a+len(underline)+1))
|
<commit_before><commit_msg>Create an underline for the selected text<commit_after># -*- coding: utf-8 -*-
import re
import sublime, sublime_plugin
class UnderlineCommand(sublime_plugin.TextCommand):
def run(self, edit):
sel = self.view.sel()[0]
line = self.view.substr(self.view.line(sel))
underline = "\n" + ("-" * len(line))
insertPos = sel
while(self.view.substr(sublime.Region(insertPos.a, insertPos.a+1)) != '\n' and insertPos.a < self.view.size()):
insertPos = sublime.Region(insertPos.a+1, insertPos.a+1)
if (insertPos.a == self.view.size()):
underline += "\n"
self.view.insert(edit, insertPos.begin(), underline)
self.view.sel().clear()
self.view.sel().add(sublime.Region(insertPos.a+len(underline)+1, insertPos.a+len(underline)+1))
|
|
433b2284ab8150a5c8c27b295b34324c7a87e905
|
tests/test_email_client.py
|
tests/test_email_client.py
|
from mock import call, patch
from unittest import TestCase
from keteparaha import GmailImapClient
@patch('keteparaha.email_client.IMAPClient')
class GmailClientTest(TestCase):
def test_init_setups_and_logs_in(self, mock_imap_client):
client = GmailImapClient('email', 'password')
self.assertEqual(client.email_address, 'email')
self.assertEqual(client.password, 'password')
self.assertEqual(
mock_imap_client.call_args,
call(GmailImapClient.IMAP_SERVER, use_uid=True, ssl=True)
)
self.assertEqual(
mock_imap_client().login.call_args,
call('email', 'password')
)
self.assertEqual(
mock_imap_client().select_folder.call_args,
call('INBOX')
)
@patch('keteparaha.email_client.email.message_from_string')
def test_gmail_search_performs_login_logout_dance(
self, mock_message_from_string, mock_imap_client
):
client = GmailImapClient('email', 'password')
mock_imap_client.return_value.fetch.return_value = {
1: {'RFC822': 'msg 1'}
}
result = client.gmail_search('query')
self.assertEqual(
mock_imap_client().logout.call_args_list, [call(), call()])
self.assertEqual(
mock_imap_client().login.call_args_list,
[
call(client.email_address, client.password),
call(client.email_address, client.password),
call(client.email_address, client.password)
]
)
self.assertEqual(
mock_imap_client().fetch.call_args_list,
[
call(mock_imap_client().gmail_search(), ['RFC822']),
]
)
self.assertEqual(result, [mock_message_from_string.return_value])
|
Test for the gmail client class
|
Test for the gmail client class
|
Python
|
mit
|
aychedee/keteparaha,tomdottom/keteparaha
|
Test for the gmail client class
|
from mock import call, patch
from unittest import TestCase
from keteparaha import GmailImapClient
@patch('keteparaha.email_client.IMAPClient')
class GmailClientTest(TestCase):
def test_init_setups_and_logs_in(self, mock_imap_client):
client = GmailImapClient('email', 'password')
self.assertEqual(client.email_address, 'email')
self.assertEqual(client.password, 'password')
self.assertEqual(
mock_imap_client.call_args,
call(GmailImapClient.IMAP_SERVER, use_uid=True, ssl=True)
)
self.assertEqual(
mock_imap_client().login.call_args,
call('email', 'password')
)
self.assertEqual(
mock_imap_client().select_folder.call_args,
call('INBOX')
)
@patch('keteparaha.email_client.email.message_from_string')
def test_gmail_search_performs_login_logout_dance(
self, mock_message_from_string, mock_imap_client
):
client = GmailImapClient('email', 'password')
mock_imap_client.return_value.fetch.return_value = {
1: {'RFC822': 'msg 1'}
}
result = client.gmail_search('query')
self.assertEqual(
mock_imap_client().logout.call_args_list, [call(), call()])
self.assertEqual(
mock_imap_client().login.call_args_list,
[
call(client.email_address, client.password),
call(client.email_address, client.password),
call(client.email_address, client.password)
]
)
self.assertEqual(
mock_imap_client().fetch.call_args_list,
[
call(mock_imap_client().gmail_search(), ['RFC822']),
]
)
self.assertEqual(result, [mock_message_from_string.return_value])
|
<commit_before><commit_msg>Test for the gmail client class<commit_after>
|
from mock import call, patch
from unittest import TestCase
from keteparaha import GmailImapClient
@patch('keteparaha.email_client.IMAPClient')
class GmailClientTest(TestCase):
def test_init_setups_and_logs_in(self, mock_imap_client):
client = GmailImapClient('email', 'password')
self.assertEqual(client.email_address, 'email')
self.assertEqual(client.password, 'password')
self.assertEqual(
mock_imap_client.call_args,
call(GmailImapClient.IMAP_SERVER, use_uid=True, ssl=True)
)
self.assertEqual(
mock_imap_client().login.call_args,
call('email', 'password')
)
self.assertEqual(
mock_imap_client().select_folder.call_args,
call('INBOX')
)
@patch('keteparaha.email_client.email.message_from_string')
def test_gmail_search_performs_login_logout_dance(
self, mock_message_from_string, mock_imap_client
):
client = GmailImapClient('email', 'password')
mock_imap_client.return_value.fetch.return_value = {
1: {'RFC822': 'msg 1'}
}
result = client.gmail_search('query')
self.assertEqual(
mock_imap_client().logout.call_args_list, [call(), call()])
self.assertEqual(
mock_imap_client().login.call_args_list,
[
call(client.email_address, client.password),
call(client.email_address, client.password),
call(client.email_address, client.password)
]
)
self.assertEqual(
mock_imap_client().fetch.call_args_list,
[
call(mock_imap_client().gmail_search(), ['RFC822']),
]
)
self.assertEqual(result, [mock_message_from_string.return_value])
|
Test for the gmail client classfrom mock import call, patch
from unittest import TestCase
from keteparaha import GmailImapClient
@patch('keteparaha.email_client.IMAPClient')
class GmailClientTest(TestCase):
def test_init_setups_and_logs_in(self, mock_imap_client):
client = GmailImapClient('email', 'password')
self.assertEqual(client.email_address, 'email')
self.assertEqual(client.password, 'password')
self.assertEqual(
mock_imap_client.call_args,
call(GmailImapClient.IMAP_SERVER, use_uid=True, ssl=True)
)
self.assertEqual(
mock_imap_client().login.call_args,
call('email', 'password')
)
self.assertEqual(
mock_imap_client().select_folder.call_args,
call('INBOX')
)
@patch('keteparaha.email_client.email.message_from_string')
def test_gmail_search_performs_login_logout_dance(
self, mock_message_from_string, mock_imap_client
):
client = GmailImapClient('email', 'password')
mock_imap_client.return_value.fetch.return_value = {
1: {'RFC822': 'msg 1'}
}
result = client.gmail_search('query')
self.assertEqual(
mock_imap_client().logout.call_args_list, [call(), call()])
self.assertEqual(
mock_imap_client().login.call_args_list,
[
call(client.email_address, client.password),
call(client.email_address, client.password),
call(client.email_address, client.password)
]
)
self.assertEqual(
mock_imap_client().fetch.call_args_list,
[
call(mock_imap_client().gmail_search(), ['RFC822']),
]
)
self.assertEqual(result, [mock_message_from_string.return_value])
|
<commit_before><commit_msg>Test for the gmail client class<commit_after>from mock import call, patch
from unittest import TestCase
from keteparaha import GmailImapClient
@patch('keteparaha.email_client.IMAPClient')
class GmailClientTest(TestCase):
def test_init_setups_and_logs_in(self, mock_imap_client):
client = GmailImapClient('email', 'password')
self.assertEqual(client.email_address, 'email')
self.assertEqual(client.password, 'password')
self.assertEqual(
mock_imap_client.call_args,
call(GmailImapClient.IMAP_SERVER, use_uid=True, ssl=True)
)
self.assertEqual(
mock_imap_client().login.call_args,
call('email', 'password')
)
self.assertEqual(
mock_imap_client().select_folder.call_args,
call('INBOX')
)
@patch('keteparaha.email_client.email.message_from_string')
def test_gmail_search_performs_login_logout_dance(
self, mock_message_from_string, mock_imap_client
):
client = GmailImapClient('email', 'password')
mock_imap_client.return_value.fetch.return_value = {
1: {'RFC822': 'msg 1'}
}
result = client.gmail_search('query')
self.assertEqual(
mock_imap_client().logout.call_args_list, [call(), call()])
self.assertEqual(
mock_imap_client().login.call_args_list,
[
call(client.email_address, client.password),
call(client.email_address, client.password),
call(client.email_address, client.password)
]
)
self.assertEqual(
mock_imap_client().fetch.call_args_list,
[
call(mock_imap_client().gmail_search(), ['RFC822']),
]
)
self.assertEqual(result, [mock_message_from_string.return_value])
|
|
b307df3b2b45e5ab003903b8ed5cf341506965fd
|
tests/test_model_object.py
|
tests/test_model_object.py
|
# encoding: utf-8
from marathon.models.base import MarathonObject
import unittest
class MarathonObjectTest(unittest.TestCase):
def test_hashable(self):
"""
Regression test for issue #203
MarathonObject defined __eq__ but not __hash__, meaning that in
in Python2.7 MarathonObjects are hashable, but in Python3 they're not,
This test ensures that we are hashable in all versions of python
"""
obj = MarathonObject()
collection = {}
collection[obj] = True
assert collection[obj]
|
Add a regression test showing hashing error
|
Add a regression test showing hashing error
|
Python
|
mit
|
thefactory/marathon-python,thefactory/marathon-python
|
Add a regression test showing hashing error
|
# encoding: utf-8
from marathon.models.base import MarathonObject
import unittest
class MarathonObjectTest(unittest.TestCase):
def test_hashable(self):
"""
Regression test for issue #203
MarathonObject defined __eq__ but not __hash__, meaning that in
in Python2.7 MarathonObjects are hashable, but in Python3 they're not,
This test ensures that we are hashable in all versions of python
"""
obj = MarathonObject()
collection = {}
collection[obj] = True
assert collection[obj]
|
<commit_before><commit_msg>Add a regression test showing hashing error<commit_after>
|
# encoding: utf-8
from marathon.models.base import MarathonObject
import unittest
class MarathonObjectTest(unittest.TestCase):
def test_hashable(self):
"""
Regression test for issue #203
MarathonObject defined __eq__ but not __hash__, meaning that in
in Python2.7 MarathonObjects are hashable, but in Python3 they're not,
This test ensures that we are hashable in all versions of python
"""
obj = MarathonObject()
collection = {}
collection[obj] = True
assert collection[obj]
|
Add a regression test showing hashing error# encoding: utf-8
from marathon.models.base import MarathonObject
import unittest
class MarathonObjectTest(unittest.TestCase):
def test_hashable(self):
"""
Regression test for issue #203
MarathonObject defined __eq__ but not __hash__, meaning that in
in Python2.7 MarathonObjects are hashable, but in Python3 they're not,
This test ensures that we are hashable in all versions of python
"""
obj = MarathonObject()
collection = {}
collection[obj] = True
assert collection[obj]
|
<commit_before><commit_msg>Add a regression test showing hashing error<commit_after># encoding: utf-8
from marathon.models.base import MarathonObject
import unittest
class MarathonObjectTest(unittest.TestCase):
def test_hashable(self):
"""
Regression test for issue #203
MarathonObject defined __eq__ but not __hash__, meaning that in
in Python2.7 MarathonObjects are hashable, but in Python3 they're not,
This test ensures that we are hashable in all versions of python
"""
obj = MarathonObject()
collection = {}
collection[obj] = True
assert collection[obj]
|
|
310a7fd5024e49f82504410bf40647b7c8d14207
|
tricircle/tests/unit/common/test_utils.py
|
tricircle/tests/unit/common/test_utils.py
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from tricircle.common import exceptions
from tricircle.common import utils
class TricircleUtilsTestCase(unittest.TestCase):
def test_check_string_length(self):
self.assertIsNone(utils.check_string_length(
'test', 'name', max_len=255))
self.assertRaises(exceptions.InvalidInput,
utils.check_string_length,
11, 'name', max_len=255)
self.assertRaises(exceptions.InvalidInput,
utils.check_string_length,
'', 'name', min_len=1)
self.assertRaises(exceptions.InvalidInput,
utils.check_string_length,
'a' * 256, 'name', max_len=255)
|
Add utils's check_string_length test case
|
Add utils's check_string_length test case
1. What is the problem
Tricircle does not have utils module's test case
2. What is the solution to the problem
Implement related test case
3. What the features need to be implemented to the Tricircle
No new features
Change-Id: I42e54cfe310349578ae0605789249acbc349f5e4
|
Python
|
apache-2.0
|
stackforge/tricircle,openstack/tricircle,openstack/tricircle,stackforge/tricircle
|
Add utils's check_string_length test case
1. What is the problem
Tricircle does not have utils module's test case
2. What is the solution to the problem
Implement related test case
3. What the features need to be implemented to the Tricircle
No new features
Change-Id: I42e54cfe310349578ae0605789249acbc349f5e4
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from tricircle.common import exceptions
from tricircle.common import utils
class TricircleUtilsTestCase(unittest.TestCase):
def test_check_string_length(self):
self.assertIsNone(utils.check_string_length(
'test', 'name', max_len=255))
self.assertRaises(exceptions.InvalidInput,
utils.check_string_length,
11, 'name', max_len=255)
self.assertRaises(exceptions.InvalidInput,
utils.check_string_length,
'', 'name', min_len=1)
self.assertRaises(exceptions.InvalidInput,
utils.check_string_length,
'a' * 256, 'name', max_len=255)
|
<commit_before><commit_msg>Add utils's check_string_length test case
1. What is the problem
Tricircle does not have utils module's test case
2. What is the solution to the problem
Implement related test case
3. What the features need to be implemented to the Tricircle
No new features
Change-Id: I42e54cfe310349578ae0605789249acbc349f5e4<commit_after>
|
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from tricircle.common import exceptions
from tricircle.common import utils
class TricircleUtilsTestCase(unittest.TestCase):
def test_check_string_length(self):
self.assertIsNone(utils.check_string_length(
'test', 'name', max_len=255))
self.assertRaises(exceptions.InvalidInput,
utils.check_string_length,
11, 'name', max_len=255)
self.assertRaises(exceptions.InvalidInput,
utils.check_string_length,
'', 'name', min_len=1)
self.assertRaises(exceptions.InvalidInput,
utils.check_string_length,
'a' * 256, 'name', max_len=255)
|
Add utils's check_string_length test case
1. What is the problem
Tricircle does not have utils module's test case
2. What is the solution to the problem
Implement related test case
3. What the features need to be implemented to the Tricircle
No new features
Change-Id: I42e54cfe310349578ae0605789249acbc349f5e4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from tricircle.common import exceptions
from tricircle.common import utils
class TricircleUtilsTestCase(unittest.TestCase):
def test_check_string_length(self):
self.assertIsNone(utils.check_string_length(
'test', 'name', max_len=255))
self.assertRaises(exceptions.InvalidInput,
utils.check_string_length,
11, 'name', max_len=255)
self.assertRaises(exceptions.InvalidInput,
utils.check_string_length,
'', 'name', min_len=1)
self.assertRaises(exceptions.InvalidInput,
utils.check_string_length,
'a' * 256, 'name', max_len=255)
|
<commit_before><commit_msg>Add utils's check_string_length test case
1. What is the problem
Tricircle does not have utils module's test case
2. What is the solution to the problem
Implement related test case
3. What the features need to be implemented to the Tricircle
No new features
Change-Id: I42e54cfe310349578ae0605789249acbc349f5e4<commit_after>
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from tricircle.common import exceptions
from tricircle.common import utils
class TricircleUtilsTestCase(unittest.TestCase):
def test_check_string_length(self):
self.assertIsNone(utils.check_string_length(
'test', 'name', max_len=255))
self.assertRaises(exceptions.InvalidInput,
utils.check_string_length,
11, 'name', max_len=255)
self.assertRaises(exceptions.InvalidInput,
utils.check_string_length,
'', 'name', min_len=1)
self.assertRaises(exceptions.InvalidInput,
utils.check_string_length,
'a' * 256, 'name', max_len=255)
|
|
46d7e6f8dcf9b7cd4c9de913f05d4e86ed16b497
|
migrations/versions/70c7d046881_.py
|
migrations/versions/70c7d046881_.py
|
"""Add user model
Revision ID: 70c7d046881
Revises: 19b7fe1331be
Create Date: 2013-12-07 15:30:26.169000
"""
# revision identifiers, used by Alembic.
revision = '70c7d046881'
down_revision = '19b7fe1331be'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=20), nullable=False),
sa.Column('email', sa.EmailType(length=255), nullable=False),
sa.Column('password_hash', sa.String(length=250), nullable=False),
sa.Column('user_created_datetime', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('user')
### end Alembic commands ###
|
Add db migration for user table
|
Add db migration for user table
|
Python
|
mit
|
streamr/marvin,streamr/marvin,streamr/marvin
|
Add db migration for user table
|
"""Add user model
Revision ID: 70c7d046881
Revises: 19b7fe1331be
Create Date: 2013-12-07 15:30:26.169000
"""
# revision identifiers, used by Alembic.
revision = '70c7d046881'
down_revision = '19b7fe1331be'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=20), nullable=False),
sa.Column('email', sa.EmailType(length=255), nullable=False),
sa.Column('password_hash', sa.String(length=250), nullable=False),
sa.Column('user_created_datetime', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('user')
### end Alembic commands ###
|
<commit_before><commit_msg>Add db migration for user table<commit_after>
|
"""Add user model
Revision ID: 70c7d046881
Revises: 19b7fe1331be
Create Date: 2013-12-07 15:30:26.169000
"""
# revision identifiers, used by Alembic.
revision = '70c7d046881'
down_revision = '19b7fe1331be'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=20), nullable=False),
sa.Column('email', sa.EmailType(length=255), nullable=False),
sa.Column('password_hash', sa.String(length=250), nullable=False),
sa.Column('user_created_datetime', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('user')
### end Alembic commands ###
|
Add db migration for user table"""Add user model
Revision ID: 70c7d046881
Revises: 19b7fe1331be
Create Date: 2013-12-07 15:30:26.169000
"""
# revision identifiers, used by Alembic.
revision = '70c7d046881'
down_revision = '19b7fe1331be'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=20), nullable=False),
sa.Column('email', sa.EmailType(length=255), nullable=False),
sa.Column('password_hash', sa.String(length=250), nullable=False),
sa.Column('user_created_datetime', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('user')
### end Alembic commands ###
|
<commit_before><commit_msg>Add db migration for user table<commit_after>"""Add user model
Revision ID: 70c7d046881
Revises: 19b7fe1331be
Create Date: 2013-12-07 15:30:26.169000
"""
# revision identifiers, used by Alembic.
revision = '70c7d046881'
down_revision = '19b7fe1331be'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=20), nullable=False),
sa.Column('email', sa.EmailType(length=255), nullable=False),
sa.Column('password_hash', sa.String(length=250), nullable=False),
sa.Column('user_created_datetime', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('user')
### end Alembic commands ###
|
|
fa88f6b332d14084f89aec99c0c436ae4c36dd58
|
setup.py
|
setup.py
|
import os
from setuptools import setup
from nvpy import nvpy
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "nvpy",
version = nvpy.VERSION,
author = "Charl P. Botha",
author_email = "cpbotha@vxlabs.com",
description = "A cross-platform simplenote-syncing note-taking app inspired by Notational Velocity.",
license = "BSD",
keywords = "simplenote note-taking tkinter nvalt markdown",
url = "https://github.com/cpbotha/nvpy",
packages=['nvpy'],
long_description=read('README.rst'),
install_requires = ['Markdown'],
entry_points = {
'gui_scripts' : ['nvpy = nvpy.nvpy:main']
},
# use MANIFEST.in file
# because package_data is ignored during sdist
include_package_data=True,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: X11 Applications",
"Environment :: MacOS X",
"Environment :: Win32 (MS Windows)",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
],
)
|
import os
from setuptools import setup
from nvpy import nvpy
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "nvpy",
version = nvpy.VERSION,
author = "Charl P. Botha",
author_email = "cpbotha@vxlabs.com",
description = "A cross-platform simplenote-syncing note-taking app inspired by Notational Velocity.",
license = "BSD",
keywords = "simplenote note-taking tkinter nvalt markdown",
url = "https://github.com/cpbotha/nvpy",
packages=['nvpy'],
long_description=read('README.rst'),
install_requires = ['Markdown', 'docutils'],
entry_points = {
'gui_scripts' : ['nvpy = nvpy.nvpy:main']
},
# use MANIFEST.in file
# because package_data is ignored during sdist
include_package_data=True,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: X11 Applications",
"Environment :: MacOS X",
"Environment :: Win32 (MS Windows)",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
],
)
|
Add docutils to the list of requirements.
|
Add docutils to the list of requirements.
Install docutils during a pip install so that rendering
reStructuredText (CTRL-r) works out of the box.
|
Python
|
bsd-3-clause
|
n8henrie/nvpy,dwu/nvpy,trankmichael/nvpy,khornberg/nvpy-gtk,yuuki0xff/nvpy,dwu/nvpy,bwillistower/nvpy,yuuki0xff/nvpy
|
import os
from setuptools import setup
from nvpy import nvpy
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "nvpy",
version = nvpy.VERSION,
author = "Charl P. Botha",
author_email = "cpbotha@vxlabs.com",
description = "A cross-platform simplenote-syncing note-taking app inspired by Notational Velocity.",
license = "BSD",
keywords = "simplenote note-taking tkinter nvalt markdown",
url = "https://github.com/cpbotha/nvpy",
packages=['nvpy'],
long_description=read('README.rst'),
install_requires = ['Markdown'],
entry_points = {
'gui_scripts' : ['nvpy = nvpy.nvpy:main']
},
# use MANIFEST.in file
# because package_data is ignored during sdist
include_package_data=True,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: X11 Applications",
"Environment :: MacOS X",
"Environment :: Win32 (MS Windows)",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
],
)
Add docutils to the list of requirements.
Install docutils during a pip install so that rendering
reStructuredText (CTRL-r) works out of the box.
|
import os
from setuptools import setup
from nvpy import nvpy
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "nvpy",
version = nvpy.VERSION,
author = "Charl P. Botha",
author_email = "cpbotha@vxlabs.com",
description = "A cross-platform simplenote-syncing note-taking app inspired by Notational Velocity.",
license = "BSD",
keywords = "simplenote note-taking tkinter nvalt markdown",
url = "https://github.com/cpbotha/nvpy",
packages=['nvpy'],
long_description=read('README.rst'),
install_requires = ['Markdown', 'docutils'],
entry_points = {
'gui_scripts' : ['nvpy = nvpy.nvpy:main']
},
# use MANIFEST.in file
# because package_data is ignored during sdist
include_package_data=True,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: X11 Applications",
"Environment :: MacOS X",
"Environment :: Win32 (MS Windows)",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
],
)
|
<commit_before>import os
from setuptools import setup
from nvpy import nvpy
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "nvpy",
version = nvpy.VERSION,
author = "Charl P. Botha",
author_email = "cpbotha@vxlabs.com",
description = "A cross-platform simplenote-syncing note-taking app inspired by Notational Velocity.",
license = "BSD",
keywords = "simplenote note-taking tkinter nvalt markdown",
url = "https://github.com/cpbotha/nvpy",
packages=['nvpy'],
long_description=read('README.rst'),
install_requires = ['Markdown'],
entry_points = {
'gui_scripts' : ['nvpy = nvpy.nvpy:main']
},
# use MANIFEST.in file
# because package_data is ignored during sdist
include_package_data=True,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: X11 Applications",
"Environment :: MacOS X",
"Environment :: Win32 (MS Windows)",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
],
)
<commit_msg>Add docutils to the list of requirements.
Install docutils during a pip install so that rendering
reStructuredText (CTRL-r) works out of the box.<commit_after>
|
import os
from setuptools import setup
from nvpy import nvpy
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "nvpy",
version = nvpy.VERSION,
author = "Charl P. Botha",
author_email = "cpbotha@vxlabs.com",
description = "A cross-platform simplenote-syncing note-taking app inspired by Notational Velocity.",
license = "BSD",
keywords = "simplenote note-taking tkinter nvalt markdown",
url = "https://github.com/cpbotha/nvpy",
packages=['nvpy'],
long_description=read('README.rst'),
install_requires = ['Markdown', 'docutils'],
entry_points = {
'gui_scripts' : ['nvpy = nvpy.nvpy:main']
},
# use MANIFEST.in file
# because package_data is ignored during sdist
include_package_data=True,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: X11 Applications",
"Environment :: MacOS X",
"Environment :: Win32 (MS Windows)",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
],
)
|
import os
from setuptools import setup
from nvpy import nvpy
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "nvpy",
version = nvpy.VERSION,
author = "Charl P. Botha",
author_email = "cpbotha@vxlabs.com",
description = "A cross-platform simplenote-syncing note-taking app inspired by Notational Velocity.",
license = "BSD",
keywords = "simplenote note-taking tkinter nvalt markdown",
url = "https://github.com/cpbotha/nvpy",
packages=['nvpy'],
long_description=read('README.rst'),
install_requires = ['Markdown'],
entry_points = {
'gui_scripts' : ['nvpy = nvpy.nvpy:main']
},
# use MANIFEST.in file
# because package_data is ignored during sdist
include_package_data=True,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: X11 Applications",
"Environment :: MacOS X",
"Environment :: Win32 (MS Windows)",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
],
)
Add docutils to the list of requirements.
Install docutils during a pip install so that rendering
reStructuredText (CTRL-r) works out of the box.import os
from setuptools import setup
from nvpy import nvpy
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "nvpy",
version = nvpy.VERSION,
author = "Charl P. Botha",
author_email = "cpbotha@vxlabs.com",
description = "A cross-platform simplenote-syncing note-taking app inspired by Notational Velocity.",
license = "BSD",
keywords = "simplenote note-taking tkinter nvalt markdown",
url = "https://github.com/cpbotha/nvpy",
packages=['nvpy'],
long_description=read('README.rst'),
install_requires = ['Markdown', 'docutils'],
entry_points = {
'gui_scripts' : ['nvpy = nvpy.nvpy:main']
},
# use MANIFEST.in file
# because package_data is ignored during sdist
include_package_data=True,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: X11 Applications",
"Environment :: MacOS X",
"Environment :: Win32 (MS Windows)",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
],
)
|
<commit_before>import os
from setuptools import setup
from nvpy import nvpy
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "nvpy",
version = nvpy.VERSION,
author = "Charl P. Botha",
author_email = "cpbotha@vxlabs.com",
description = "A cross-platform simplenote-syncing note-taking app inspired by Notational Velocity.",
license = "BSD",
keywords = "simplenote note-taking tkinter nvalt markdown",
url = "https://github.com/cpbotha/nvpy",
packages=['nvpy'],
long_description=read('README.rst'),
install_requires = ['Markdown'],
entry_points = {
'gui_scripts' : ['nvpy = nvpy.nvpy:main']
},
# use MANIFEST.in file
# because package_data is ignored during sdist
include_package_data=True,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: X11 Applications",
"Environment :: MacOS X",
"Environment :: Win32 (MS Windows)",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
],
)
<commit_msg>Add docutils to the list of requirements.
Install docutils during a pip install so that rendering
reStructuredText (CTRL-r) works out of the box.<commit_after>import os
from setuptools import setup
from nvpy import nvpy
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "nvpy",
version = nvpy.VERSION,
author = "Charl P. Botha",
author_email = "cpbotha@vxlabs.com",
description = "A cross-platform simplenote-syncing note-taking app inspired by Notational Velocity.",
license = "BSD",
keywords = "simplenote note-taking tkinter nvalt markdown",
url = "https://github.com/cpbotha/nvpy",
packages=['nvpy'],
long_description=read('README.rst'),
install_requires = ['Markdown', 'docutils'],
entry_points = {
'gui_scripts' : ['nvpy = nvpy.nvpy:main']
},
# use MANIFEST.in file
# because package_data is ignored during sdist
include_package_data=True,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: X11 Applications",
"Environment :: MacOS X",
"Environment :: Win32 (MS Windows)",
"Topic :: Utilities",
"License :: OSI Approved :: BSD License",
],
)
|
b75205319bc28f53c22fea34499d34aef279df9b
|
examples/microblog.py
|
examples/microblog.py
|
from dynamic_api_client import BaseClient
class MicroBlogApi(BaseClient):
available_paths = ['POST account/signin', 'GET posts/all']
separator = '/'
def __init__(self, base_url='https://micro.blog', path='', token=''):
self.token = token
super(self.__class__, self).__init__(base_url, path, token=token)
# override method from BaseClient to inject Authorization header
def _prepare_request(self):
super(self.__class__, self)._prepare_request()
self.request.headers['Authorization'] = 'Token {}'.format(self.token)
if __name__ == '__main__':
mba = MicroBlogApi(token='')
posts = mba.posts.all.get()
print(posts.status_code, posts.reason)
print(posts.json())
|
Include a (very) simple example
|
Include a (very) simple example
|
Python
|
mit
|
andymitchhank/bessie
|
Include a (very) simple example
|
from dynamic_api_client import BaseClient
class MicroBlogApi(BaseClient):
available_paths = ['POST account/signin', 'GET posts/all']
separator = '/'
def __init__(self, base_url='https://micro.blog', path='', token=''):
self.token = token
super(self.__class__, self).__init__(base_url, path, token=token)
# override method from BaseClient to inject Authorization header
def _prepare_request(self):
super(self.__class__, self)._prepare_request()
self.request.headers['Authorization'] = 'Token {}'.format(self.token)
if __name__ == '__main__':
mba = MicroBlogApi(token='')
posts = mba.posts.all.get()
print(posts.status_code, posts.reason)
print(posts.json())
|
<commit_before><commit_msg>Include a (very) simple example<commit_after>
|
from dynamic_api_client import BaseClient
class MicroBlogApi(BaseClient):
available_paths = ['POST account/signin', 'GET posts/all']
separator = '/'
def __init__(self, base_url='https://micro.blog', path='', token=''):
self.token = token
super(self.__class__, self).__init__(base_url, path, token=token)
# override method from BaseClient to inject Authorization header
def _prepare_request(self):
super(self.__class__, self)._prepare_request()
self.request.headers['Authorization'] = 'Token {}'.format(self.token)
if __name__ == '__main__':
mba = MicroBlogApi(token='')
posts = mba.posts.all.get()
print(posts.status_code, posts.reason)
print(posts.json())
|
Include a (very) simple examplefrom dynamic_api_client import BaseClient
class MicroBlogApi(BaseClient):
available_paths = ['POST account/signin', 'GET posts/all']
separator = '/'
def __init__(self, base_url='https://micro.blog', path='', token=''):
self.token = token
super(self.__class__, self).__init__(base_url, path, token=token)
# override method from BaseClient to inject Authorization header
def _prepare_request(self):
super(self.__class__, self)._prepare_request()
self.request.headers['Authorization'] = 'Token {}'.format(self.token)
if __name__ == '__main__':
mba = MicroBlogApi(token='')
posts = mba.posts.all.get()
print(posts.status_code, posts.reason)
print(posts.json())
|
<commit_before><commit_msg>Include a (very) simple example<commit_after>from dynamic_api_client import BaseClient
class MicroBlogApi(BaseClient):
available_paths = ['POST account/signin', 'GET posts/all']
separator = '/'
def __init__(self, base_url='https://micro.blog', path='', token=''):
self.token = token
super(self.__class__, self).__init__(base_url, path, token=token)
# override method from BaseClient to inject Authorization header
def _prepare_request(self):
super(self.__class__, self)._prepare_request()
self.request.headers['Authorization'] = 'Token {}'.format(self.token)
if __name__ == '__main__':
mba = MicroBlogApi(token='')
posts = mba.posts.all.get()
print(posts.status_code, posts.reason)
print(posts.json())
|
|
6f76a9735cd8208137e91782b233f98b406d401d
|
tests/test_simulator_main.py
|
tests/test_simulator_main.py
|
#!/usr/bin/env python3
import contextlib
import io
import nose.tools as nose
import src.simulator as sim
from unittest.mock import patch
@patch('sys.argv', [
sim.__file__, '--cache-size', '4', '--num-blocks-per-set', '1',
'--num-words-per-block', '1', '--word-addrs', '0', '8', '0', '6', '8'])
def test_main():
"""main function should produce some output"""
out = io.StringIO()
with contextlib.redirect_stdout(out):
sim.main()
main_output = out.getvalue()
nose.assert_regexp_matches(main_output, r'\bWordAddr\b')
nose.assert_regexp_matches(main_output, r'\b0110\b')
nose.assert_regexp_matches(main_output, r'\bCache')
nose.assert_regexp_matches(main_output, r'\b01\b')
nose.assert_regexp_matches(main_output, r'\b8\s*6\b')
|
Add test for main function; bring coverage to 100%
|
Add test for main function; bring coverage to 100%
|
Python
|
mit
|
caleb531/cache-simulator
|
Add test for main function; bring coverage to 100%
|
#!/usr/bin/env python3
import contextlib
import io
import nose.tools as nose
import src.simulator as sim
from unittest.mock import patch
@patch('sys.argv', [
sim.__file__, '--cache-size', '4', '--num-blocks-per-set', '1',
'--num-words-per-block', '1', '--word-addrs', '0', '8', '0', '6', '8'])
def test_main():
"""main function should produce some output"""
out = io.StringIO()
with contextlib.redirect_stdout(out):
sim.main()
main_output = out.getvalue()
nose.assert_regexp_matches(main_output, r'\bWordAddr\b')
nose.assert_regexp_matches(main_output, r'\b0110\b')
nose.assert_regexp_matches(main_output, r'\bCache')
nose.assert_regexp_matches(main_output, r'\b01\b')
nose.assert_regexp_matches(main_output, r'\b8\s*6\b')
|
<commit_before><commit_msg>Add test for main function; bring coverage to 100%<commit_after>
|
#!/usr/bin/env python3
import contextlib
import io
import nose.tools as nose
import src.simulator as sim
from unittest.mock import patch
@patch('sys.argv', [
sim.__file__, '--cache-size', '4', '--num-blocks-per-set', '1',
'--num-words-per-block', '1', '--word-addrs', '0', '8', '0', '6', '8'])
def test_main():
"""main function should produce some output"""
out = io.StringIO()
with contextlib.redirect_stdout(out):
sim.main()
main_output = out.getvalue()
nose.assert_regexp_matches(main_output, r'\bWordAddr\b')
nose.assert_regexp_matches(main_output, r'\b0110\b')
nose.assert_regexp_matches(main_output, r'\bCache')
nose.assert_regexp_matches(main_output, r'\b01\b')
nose.assert_regexp_matches(main_output, r'\b8\s*6\b')
|
Add test for main function; bring coverage to 100%#!/usr/bin/env python3
import contextlib
import io
import nose.tools as nose
import src.simulator as sim
from unittest.mock import patch
@patch('sys.argv', [
sim.__file__, '--cache-size', '4', '--num-blocks-per-set', '1',
'--num-words-per-block', '1', '--word-addrs', '0', '8', '0', '6', '8'])
def test_main():
"""main function should produce some output"""
out = io.StringIO()
with contextlib.redirect_stdout(out):
sim.main()
main_output = out.getvalue()
nose.assert_regexp_matches(main_output, r'\bWordAddr\b')
nose.assert_regexp_matches(main_output, r'\b0110\b')
nose.assert_regexp_matches(main_output, r'\bCache')
nose.assert_regexp_matches(main_output, r'\b01\b')
nose.assert_regexp_matches(main_output, r'\b8\s*6\b')
|
<commit_before><commit_msg>Add test for main function; bring coverage to 100%<commit_after>#!/usr/bin/env python3
import contextlib
import io
import nose.tools as nose
import src.simulator as sim
from unittest.mock import patch
@patch('sys.argv', [
sim.__file__, '--cache-size', '4', '--num-blocks-per-set', '1',
'--num-words-per-block', '1', '--word-addrs', '0', '8', '0', '6', '8'])
def test_main():
"""main function should produce some output"""
out = io.StringIO()
with contextlib.redirect_stdout(out):
sim.main()
main_output = out.getvalue()
nose.assert_regexp_matches(main_output, r'\bWordAddr\b')
nose.assert_regexp_matches(main_output, r'\b0110\b')
nose.assert_regexp_matches(main_output, r'\bCache')
nose.assert_regexp_matches(main_output, r'\b01\b')
nose.assert_regexp_matches(main_output, r'\b8\s*6\b')
|
|
c11e14296848ccfbaab36d540da79afc86c83b92
|
bvspca/core/migrations/0025_auto_20180202_1351.py
|
bvspca/core/migrations/0025_auto_20180202_1351.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-02-02 20:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0024_contentindexpage_empty_message'),
]
operations = [
migrations.AlterField(
model_name='contentindexpage',
name='empty_message',
field=models.CharField(default='Empty', max_length=200),
),
]
|
Add default message for list pages
|
Add default message for list pages
|
Python
|
mit
|
nfletton/bvspca,nfletton/bvspca,nfletton/bvspca,nfletton/bvspca
|
Add default message for list pages
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-02-02 20:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0024_contentindexpage_empty_message'),
]
operations = [
migrations.AlterField(
model_name='contentindexpage',
name='empty_message',
field=models.CharField(default='Empty', max_length=200),
),
]
|
<commit_before><commit_msg>Add default message for list pages<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-02-02 20:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0024_contentindexpage_empty_message'),
]
operations = [
migrations.AlterField(
model_name='contentindexpage',
name='empty_message',
field=models.CharField(default='Empty', max_length=200),
),
]
|
Add default message for list pages# -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-02-02 20:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0024_contentindexpage_empty_message'),
]
operations = [
migrations.AlterField(
model_name='contentindexpage',
name='empty_message',
field=models.CharField(default='Empty', max_length=200),
),
]
|
<commit_before><commit_msg>Add default message for list pages<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-02-02 20:51
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0024_contentindexpage_empty_message'),
]
operations = [
migrations.AlterField(
model_name='contentindexpage',
name='empty_message',
field=models.CharField(default='Empty', max_length=200),
),
]
|
|
ddd110081d4b21da88d947702ba2e37f87bf8cb0
|
tests/food/test_suggest_restaurant.py
|
tests/food/test_suggest_restaurant.py
|
import unittest
from click.testing import CliRunner
import yoda
class TestSuggestRestaurant(unittest.TestCase):
"""
Test for the following commands:
| Module: food
| command: suggest_restaurant
"""
def __init__(self, methodName="runTest"):
super(TestSuggestRestaurant, self).__init__()
self.runner = CliRunner()
def runTest(self):
# Test Restaurant Suggestion
result = self.runner.invoke(yoda.cli, ['food', 'suggest_restaurant'], input='Berlin\nJamaican')
self.assertIn("Why don't you try THIS restaurant tonight!", result.output)
self.assertIsNone(result.exception)
|
Add unit test for suggest_restaurant command.
|
Add unit test for suggest_restaurant command.
|
Python
|
mit
|
dude-pa/dude
|
Add unit test for suggest_restaurant command.
|
import unittest
from click.testing import CliRunner
import yoda
class TestSuggestRestaurant(unittest.TestCase):
"""
Test for the following commands:
| Module: food
| command: suggest_restaurant
"""
def __init__(self, methodName="runTest"):
super(TestSuggestRestaurant, self).__init__()
self.runner = CliRunner()
def runTest(self):
# Test Restaurant Suggestion
result = self.runner.invoke(yoda.cli, ['food', 'suggest_restaurant'], input='Berlin\nJamaican')
self.assertIn("Why don't you try THIS restaurant tonight!", result.output)
self.assertIsNone(result.exception)
|
<commit_before><commit_msg>Add unit test for suggest_restaurant command.<commit_after>
|
import unittest
from click.testing import CliRunner
import yoda
class TestSuggestRestaurant(unittest.TestCase):
"""
Test for the following commands:
| Module: food
| command: suggest_restaurant
"""
def __init__(self, methodName="runTest"):
super(TestSuggestRestaurant, self).__init__()
self.runner = CliRunner()
def runTest(self):
# Test Restaurant Suggestion
result = self.runner.invoke(yoda.cli, ['food', 'suggest_restaurant'], input='Berlin\nJamaican')
self.assertIn("Why don't you try THIS restaurant tonight!", result.output)
self.assertIsNone(result.exception)
|
Add unit test for suggest_restaurant command.import unittest
from click.testing import CliRunner
import yoda
class TestSuggestRestaurant(unittest.TestCase):
"""
Test for the following commands:
| Module: food
| command: suggest_restaurant
"""
def __init__(self, methodName="runTest"):
super(TestSuggestRestaurant, self).__init__()
self.runner = CliRunner()
def runTest(self):
# Test Restaurant Suggestion
result = self.runner.invoke(yoda.cli, ['food', 'suggest_restaurant'], input='Berlin\nJamaican')
self.assertIn("Why don't you try THIS restaurant tonight!", result.output)
self.assertIsNone(result.exception)
|
<commit_before><commit_msg>Add unit test for suggest_restaurant command.<commit_after>import unittest
from click.testing import CliRunner
import yoda
class TestSuggestRestaurant(unittest.TestCase):
"""
Test for the following commands:
| Module: food
| command: suggest_restaurant
"""
def __init__(self, methodName="runTest"):
super(TestSuggestRestaurant, self).__init__()
self.runner = CliRunner()
def runTest(self):
# Test Restaurant Suggestion
result = self.runner.invoke(yoda.cli, ['food', 'suggest_restaurant'], input='Berlin\nJamaican')
self.assertIn("Why don't you try THIS restaurant tonight!", result.output)
self.assertIsNone(result.exception)
|
|
a59cfbbbfd0732c58b9e2373d45118d01f7fcb90
|
website/tests/test_jobs.py
|
website/tests/test_jobs.py
|
import time
from database import update, utc_now, db
from database_testing import DatabaseTest
from jobs import hard_delete_expired_datasets
from models import User, UsersMutationsDataset
from test_models.test_dataset import create_test_dataset
class JobTest(DatabaseTest):
def test_hard_delete_dataset(self):
user = User('user@domain', 'password')
# let's create five datasets
datasets = []
for _ in range(5):
datasets.append(create_test_dataset(owner=user))
# and make two of them expired
for dataset in datasets[:2]:
update(dataset, store_until=utc_now())
db.session.commit()
time.sleep(2)
removed_cnt = hard_delete_expired_datasets()
# two were removed, three remained
assert removed_cnt == 2
assert UsersMutationsDataset.query.count() == 3
|
Add test for hard delete job
|
Add test for hard delete job
|
Python
|
lgpl-2.1
|
reimandlab/ActiveDriverDB,reimandlab/ActiveDriverDB,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/ActiveDriverDB
|
Add test for hard delete job
|
import time
from database import update, utc_now, db
from database_testing import DatabaseTest
from jobs import hard_delete_expired_datasets
from models import User, UsersMutationsDataset
from test_models.test_dataset import create_test_dataset
class JobTest(DatabaseTest):
def test_hard_delete_dataset(self):
user = User('user@domain', 'password')
# let's create five datasets
datasets = []
for _ in range(5):
datasets.append(create_test_dataset(owner=user))
# and make two of them expired
for dataset in datasets[:2]:
update(dataset, store_until=utc_now())
db.session.commit()
time.sleep(2)
removed_cnt = hard_delete_expired_datasets()
# two were removed, three remained
assert removed_cnt == 2
assert UsersMutationsDataset.query.count() == 3
|
<commit_before><commit_msg>Add test for hard delete job<commit_after>
|
import time
from database import update, utc_now, db
from database_testing import DatabaseTest
from jobs import hard_delete_expired_datasets
from models import User, UsersMutationsDataset
from test_models.test_dataset import create_test_dataset
class JobTest(DatabaseTest):
def test_hard_delete_dataset(self):
user = User('user@domain', 'password')
# let's create five datasets
datasets = []
for _ in range(5):
datasets.append(create_test_dataset(owner=user))
# and make two of them expired
for dataset in datasets[:2]:
update(dataset, store_until=utc_now())
db.session.commit()
time.sleep(2)
removed_cnt = hard_delete_expired_datasets()
# two were removed, three remained
assert removed_cnt == 2
assert UsersMutationsDataset.query.count() == 3
|
Add test for hard delete jobimport time
from database import update, utc_now, db
from database_testing import DatabaseTest
from jobs import hard_delete_expired_datasets
from models import User, UsersMutationsDataset
from test_models.test_dataset import create_test_dataset
class JobTest(DatabaseTest):
def test_hard_delete_dataset(self):
user = User('user@domain', 'password')
# let's create five datasets
datasets = []
for _ in range(5):
datasets.append(create_test_dataset(owner=user))
# and make two of them expired
for dataset in datasets[:2]:
update(dataset, store_until=utc_now())
db.session.commit()
time.sleep(2)
removed_cnt = hard_delete_expired_datasets()
# two were removed, three remained
assert removed_cnt == 2
assert UsersMutationsDataset.query.count() == 3
|
<commit_before><commit_msg>Add test for hard delete job<commit_after>import time
from database import update, utc_now, db
from database_testing import DatabaseTest
from jobs import hard_delete_expired_datasets
from models import User, UsersMutationsDataset
from test_models.test_dataset import create_test_dataset
class JobTest(DatabaseTest):
def test_hard_delete_dataset(self):
user = User('user@domain', 'password')
# let's create five datasets
datasets = []
for _ in range(5):
datasets.append(create_test_dataset(owner=user))
# and make two of them expired
for dataset in datasets[:2]:
update(dataset, store_until=utc_now())
db.session.commit()
time.sleep(2)
removed_cnt = hard_delete_expired_datasets()
# two were removed, three remained
assert removed_cnt == 2
assert UsersMutationsDataset.query.count() == 3
|
|
ec2861e077ec4b4084b60df085baf41caf9e15d4
|
readthedocs/rtd_tests/tests/test_oauth.py
|
readthedocs/rtd_tests/tests/test_oauth.py
|
from django.test import TestCase
from django.contrib.auth.models import User
from allauth.socialaccount.models import SocialToken
from projects.models import Project
from oauth.utils import make_github_project, make_github_organization, import_github
from oauth.models import GithubOrganization, GithubProject
class RedirectOauth(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
self.client.login(username='eric', password='test')
self.user = User.objects.get(pk=1)
self.project = Project.objects.get(slug='pip')
self.org = GithubOrganization()
self.privacy = self.project.version_privacy_level
def test_make_github_project_pass(self):
repo_json = {
"name": "",
"full_name": "",
"description": "",
"git_url": "",
"private": False,
"ssh_url": "",
"html_url": "",
}
github_project = make_github_project(user=self.user, org=self.org, privacy=self.privacy, repo_json=repo_json)
self.assertIsInstance(github_project, GithubProject)
def test_make_github_project_fail(self):
repo_json = {
"name": "",
"full_name": "",
"description": "",
"git_url": "",
"private": True,
"ssh_url": "",
"html_url": "",
}
github_project = make_github_project(user=self.user, org=self.org, privacy=self.privacy, repo_json=repo_json)
self.assertIsNone(github_project)
def test_make_github_organization(self):
org_json = {
"html_url": "",
"name": "",
"email": "",
"login": "",
}
org = make_github_organization(self.user, org_json)
self.assertIsInstance(org, GithubOrganization)
def test_import_github_with_no_token(self):
github_connected = import_github(self.user, sync=True)
self.assertEqual(github_connected, False)
|
Test for make_github_projec and make_github_organization
|
Test for make_github_projec and make_github_organization
|
Python
|
mit
|
wanghaven/readthedocs.org,asampat3090/readthedocs.org,davidfischer/readthedocs.org,sunnyzwh/readthedocs.org,nikolas/readthedocs.org,laplaceliu/readthedocs.org,hach-que/readthedocs.org,nikolas/readthedocs.org,raven47git/readthedocs.org,hach-que/readthedocs.org,cgourlay/readthedocs.org,royalwang/readthedocs.org,laplaceliu/readthedocs.org,sid-kap/readthedocs.org,davidfischer/readthedocs.org,davidfischer/readthedocs.org,tddv/readthedocs.org,takluyver/readthedocs.org,asampat3090/readthedocs.org,davidfischer/readthedocs.org,agjohnson/readthedocs.org,singingwolfboy/readthedocs.org,mrshoki/readthedocs.org,techtonik/readthedocs.org,mhils/readthedocs.org,raven47git/readthedocs.org,wijerasa/readthedocs.org,GovReady/readthedocs.org,Carreau/readthedocs.org,LukasBoersma/readthedocs.org,atsuyim/readthedocs.org,attakei/readthedocs-oauth,SteveViss/readthedocs.org,d0ugal/readthedocs.org,royalwang/readthedocs.org,KamranMackey/readthedocs.org,atsuyim/readthedocs.org,emawind84/readthedocs.org,kdkeyser/readthedocs.org,titiushko/readthedocs.org,dirn/readthedocs.org,rtfd/readthedocs.org,espdev/readthedocs.org,istresearch/readthedocs.org,wijerasa/readthedocs.org,SteveViss/readthedocs.org,wijerasa/readthedocs.org,VishvajitP/readthedocs.org,espdev/readthedocs.org,GovReady/readthedocs.org,sils1297/readthedocs.org,Carreau/readthedocs.org,LukasBoersma/readthedocs.org,techtonik/readthedocs.org,wanghaven/readthedocs.org,asampat3090/readthedocs.org,mhils/readthedocs.org,michaelmcandrew/readthedocs.org,d0ugal/readthedocs.org,takluyver/readthedocs.org,pombredanne/readthedocs.org,fujita-shintaro/readthedocs.org,mrshoki/readthedocs.org,kenshinthebattosai/readthedocs.org,jerel/readthedocs.org,d0ugal/readthedocs.org,espdev/readthedocs.org,stevepiercy/readthedocs.org,rtfd/readthedocs.org,d0ugal/readthedocs.org,kdkeyser/readthedocs.org,cgourlay/readthedocs.org,agjohnson/readthedocs.org,tddv/readthedocs.org,kenwang76/readthedocs.org,soulshake/readthedocs.org,kenwang76/readthedocs.org,pombredanne/readthedocs.org,Carreau/readthedocs.org,safwanrahman/readthedocs.org,kenshinthebattosai/readthedocs.org,kenwang76/readthedocs.org,sils1297/readthedocs.org,cgourlay/readthedocs.org,gjtorikian/readthedocs.org,techtonik/readthedocs.org,hach-que/readthedocs.org,dirn/readthedocs.org,singingwolfboy/readthedocs.org,dirn/readthedocs.org,cgourlay/readthedocs.org,Tazer/readthedocs.org,sils1297/readthedocs.org,sunnyzwh/readthedocs.org,agjohnson/readthedocs.org,pombredanne/readthedocs.org,laplaceliu/readthedocs.org,clarkperkins/readthedocs.org,fujita-shintaro/readthedocs.org,kdkeyser/readthedocs.org,takluyver/readthedocs.org,emawind84/readthedocs.org,techtonik/readthedocs.org,sunnyzwh/readthedocs.org,VishvajitP/readthedocs.org,LukasBoersma/readthedocs.org,sid-kap/readthedocs.org,royalwang/readthedocs.org,gjtorikian/readthedocs.org,KamranMackey/readthedocs.org,dirn/readthedocs.org,gjtorikian/readthedocs.org,safwanrahman/readthedocs.org,VishvajitP/readthedocs.org,CedarLogic/readthedocs.org,soulshake/readthedocs.org,safwanrahman/readthedocs.org,rtfd/readthedocs.org,asampat3090/readthedocs.org,stevepiercy/readthedocs.org,michaelmcandrew/readthedocs.org,Tazer/readthedocs.org,clarkperkins/readthedocs.org,wanghaven/readthedocs.org,KamranMackey/readthedocs.org,nikolas/readthedocs.org,royalwang/readthedocs.org,kenwang76/readthedocs.org,singingwolfboy/readthedocs.org,gjtorikian/readthedocs.org,jerel/readthedocs.org,jerel/readthedocs.org,atsuyim/readthedocs.org,stevepiercy/readthedocs.org,fujita-shintaro/readthedocs.org,takluyver/readthedocs.org,sils1297/readthedocs.org,atsuyim/readthedocs.org,mrshoki/readthedocs.org,attakei/readthedocs-oauth,SteveViss/readthedocs.org,SteveViss/readthedocs.org,titiushko/readthedocs.org,mhils/readthedocs.org,CedarLogic/readthedocs.org,hach-que/readthedocs.org,attakei/readthedocs-oauth,wijerasa/readthedocs.org,raven47git/readthedocs.org,raven47git/readthedocs.org,sunnyzwh/readthedocs.org,soulshake/readthedocs.org,emawind84/readthedocs.org,KamranMackey/readthedocs.org,tddv/readthedocs.org,Carreau/readthedocs.org,soulshake/readthedocs.org,espdev/readthedocs.org,attakei/readthedocs-oauth,kenshinthebattosai/readthedocs.org,mhils/readthedocs.org,Tazer/readthedocs.org,kenshinthebattosai/readthedocs.org,istresearch/readthedocs.org,istresearch/readthedocs.org,rtfd/readthedocs.org,GovReady/readthedocs.org,kdkeyser/readthedocs.org,CedarLogic/readthedocs.org,LukasBoersma/readthedocs.org,CedarLogic/readthedocs.org,nikolas/readthedocs.org,safwanrahman/readthedocs.org,GovReady/readthedocs.org,istresearch/readthedocs.org,sid-kap/readthedocs.org,stevepiercy/readthedocs.org,michaelmcandrew/readthedocs.org,laplaceliu/readthedocs.org,sid-kap/readthedocs.org,clarkperkins/readthedocs.org,agjohnson/readthedocs.org,jerel/readthedocs.org,emawind84/readthedocs.org,clarkperkins/readthedocs.org,fujita-shintaro/readthedocs.org,Tazer/readthedocs.org,titiushko/readthedocs.org,michaelmcandrew/readthedocs.org,VishvajitP/readthedocs.org,singingwolfboy/readthedocs.org,mrshoki/readthedocs.org,espdev/readthedocs.org,wanghaven/readthedocs.org,titiushko/readthedocs.org
|
Test for make_github_projec and make_github_organization
|
from django.test import TestCase
from django.contrib.auth.models import User
from allauth.socialaccount.models import SocialToken
from projects.models import Project
from oauth.utils import make_github_project, make_github_organization, import_github
from oauth.models import GithubOrganization, GithubProject
class RedirectOauth(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
self.client.login(username='eric', password='test')
self.user = User.objects.get(pk=1)
self.project = Project.objects.get(slug='pip')
self.org = GithubOrganization()
self.privacy = self.project.version_privacy_level
def test_make_github_project_pass(self):
repo_json = {
"name": "",
"full_name": "",
"description": "",
"git_url": "",
"private": False,
"ssh_url": "",
"html_url": "",
}
github_project = make_github_project(user=self.user, org=self.org, privacy=self.privacy, repo_json=repo_json)
self.assertIsInstance(github_project, GithubProject)
def test_make_github_project_fail(self):
repo_json = {
"name": "",
"full_name": "",
"description": "",
"git_url": "",
"private": True,
"ssh_url": "",
"html_url": "",
}
github_project = make_github_project(user=self.user, org=self.org, privacy=self.privacy, repo_json=repo_json)
self.assertIsNone(github_project)
def test_make_github_organization(self):
org_json = {
"html_url": "",
"name": "",
"email": "",
"login": "",
}
org = make_github_organization(self.user, org_json)
self.assertIsInstance(org, GithubOrganization)
def test_import_github_with_no_token(self):
github_connected = import_github(self.user, sync=True)
self.assertEqual(github_connected, False)
|
<commit_before><commit_msg>Test for make_github_projec and make_github_organization<commit_after>
|
from django.test import TestCase
from django.contrib.auth.models import User
from allauth.socialaccount.models import SocialToken
from projects.models import Project
from oauth.utils import make_github_project, make_github_organization, import_github
from oauth.models import GithubOrganization, GithubProject
class RedirectOauth(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
self.client.login(username='eric', password='test')
self.user = User.objects.get(pk=1)
self.project = Project.objects.get(slug='pip')
self.org = GithubOrganization()
self.privacy = self.project.version_privacy_level
def test_make_github_project_pass(self):
repo_json = {
"name": "",
"full_name": "",
"description": "",
"git_url": "",
"private": False,
"ssh_url": "",
"html_url": "",
}
github_project = make_github_project(user=self.user, org=self.org, privacy=self.privacy, repo_json=repo_json)
self.assertIsInstance(github_project, GithubProject)
def test_make_github_project_fail(self):
repo_json = {
"name": "",
"full_name": "",
"description": "",
"git_url": "",
"private": True,
"ssh_url": "",
"html_url": "",
}
github_project = make_github_project(user=self.user, org=self.org, privacy=self.privacy, repo_json=repo_json)
self.assertIsNone(github_project)
def test_make_github_organization(self):
org_json = {
"html_url": "",
"name": "",
"email": "",
"login": "",
}
org = make_github_organization(self.user, org_json)
self.assertIsInstance(org, GithubOrganization)
def test_import_github_with_no_token(self):
github_connected = import_github(self.user, sync=True)
self.assertEqual(github_connected, False)
|
Test for make_github_projec and make_github_organizationfrom django.test import TestCase
from django.contrib.auth.models import User
from allauth.socialaccount.models import SocialToken
from projects.models import Project
from oauth.utils import make_github_project, make_github_organization, import_github
from oauth.models import GithubOrganization, GithubProject
class RedirectOauth(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
self.client.login(username='eric', password='test')
self.user = User.objects.get(pk=1)
self.project = Project.objects.get(slug='pip')
self.org = GithubOrganization()
self.privacy = self.project.version_privacy_level
def test_make_github_project_pass(self):
repo_json = {
"name": "",
"full_name": "",
"description": "",
"git_url": "",
"private": False,
"ssh_url": "",
"html_url": "",
}
github_project = make_github_project(user=self.user, org=self.org, privacy=self.privacy, repo_json=repo_json)
self.assertIsInstance(github_project, GithubProject)
def test_make_github_project_fail(self):
repo_json = {
"name": "",
"full_name": "",
"description": "",
"git_url": "",
"private": True,
"ssh_url": "",
"html_url": "",
}
github_project = make_github_project(user=self.user, org=self.org, privacy=self.privacy, repo_json=repo_json)
self.assertIsNone(github_project)
def test_make_github_organization(self):
org_json = {
"html_url": "",
"name": "",
"email": "",
"login": "",
}
org = make_github_organization(self.user, org_json)
self.assertIsInstance(org, GithubOrganization)
def test_import_github_with_no_token(self):
github_connected = import_github(self.user, sync=True)
self.assertEqual(github_connected, False)
|
<commit_before><commit_msg>Test for make_github_projec and make_github_organization<commit_after>from django.test import TestCase
from django.contrib.auth.models import User
from allauth.socialaccount.models import SocialToken
from projects.models import Project
from oauth.utils import make_github_project, make_github_organization, import_github
from oauth.models import GithubOrganization, GithubProject
class RedirectOauth(TestCase):
fixtures = ["eric", "test_data"]
def setUp(self):
self.client.login(username='eric', password='test')
self.user = User.objects.get(pk=1)
self.project = Project.objects.get(slug='pip')
self.org = GithubOrganization()
self.privacy = self.project.version_privacy_level
def test_make_github_project_pass(self):
repo_json = {
"name": "",
"full_name": "",
"description": "",
"git_url": "",
"private": False,
"ssh_url": "",
"html_url": "",
}
github_project = make_github_project(user=self.user, org=self.org, privacy=self.privacy, repo_json=repo_json)
self.assertIsInstance(github_project, GithubProject)
def test_make_github_project_fail(self):
repo_json = {
"name": "",
"full_name": "",
"description": "",
"git_url": "",
"private": True,
"ssh_url": "",
"html_url": "",
}
github_project = make_github_project(user=self.user, org=self.org, privacy=self.privacy, repo_json=repo_json)
self.assertIsNone(github_project)
def test_make_github_organization(self):
org_json = {
"html_url": "",
"name": "",
"email": "",
"login": "",
}
org = make_github_organization(self.user, org_json)
self.assertIsInstance(org, GithubOrganization)
def test_import_github_with_no_token(self):
github_connected = import_github(self.user, sync=True)
self.assertEqual(github_connected, False)
|
|
73509b5cc07bbf4610b9860cadd1d09e529b710d
|
create_dummy_data.py
|
create_dummy_data.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# EXPLANATION:
# This file fills the folder /data with dummy files (necessary for development
# purposes while we don't have real data yet)
# -----------------------------------------------------------------------------
import numpy as np
# Get the number of people
n_people = 0
with open('people.csv', 'r') as f:
n_people = len(f.readlines())
# Create a dummy file for each person
for i in range(n_people):
with open('./data/{}.csv'.format(i), 'w+') as f:
for j in range(n_people):
number = int(np.random.uniform(0,1) < 0.1)
if j==i:
number = 0
f.write('{}, {}\n'.format(j, number))
|
Create dummy files for /data
|
Create dummy files for /data
|
Python
|
mit
|
MartinThoma/akademie-graph,MartinThoma/akademie-graph,MartinThoma/akademie-graph
|
Create dummy files for /data
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# EXPLANATION:
# This file fills the folder /data with dummy files (necessary for development
# purposes while we don't have real data yet)
# -----------------------------------------------------------------------------
import numpy as np
# Get the number of people
n_people = 0
with open('people.csv', 'r') as f:
n_people = len(f.readlines())
# Create a dummy file for each person
for i in range(n_people):
with open('./data/{}.csv'.format(i), 'w+') as f:
for j in range(n_people):
number = int(np.random.uniform(0,1) < 0.1)
if j==i:
number = 0
f.write('{}, {}\n'.format(j, number))
|
<commit_before><commit_msg>Create dummy files for /data<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# EXPLANATION:
# This file fills the folder /data with dummy files (necessary for development
# purposes while we don't have real data yet)
# -----------------------------------------------------------------------------
import numpy as np
# Get the number of people
n_people = 0
with open('people.csv', 'r') as f:
n_people = len(f.readlines())
# Create a dummy file for each person
for i in range(n_people):
with open('./data/{}.csv'.format(i), 'w+') as f:
for j in range(n_people):
number = int(np.random.uniform(0,1) < 0.1)
if j==i:
number = 0
f.write('{}, {}\n'.format(j, number))
|
Create dummy files for /data#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# EXPLANATION:
# This file fills the folder /data with dummy files (necessary for development
# purposes while we don't have real data yet)
# -----------------------------------------------------------------------------
import numpy as np
# Get the number of people
n_people = 0
with open('people.csv', 'r') as f:
n_people = len(f.readlines())
# Create a dummy file for each person
for i in range(n_people):
with open('./data/{}.csv'.format(i), 'w+') as f:
for j in range(n_people):
number = int(np.random.uniform(0,1) < 0.1)
if j==i:
number = 0
f.write('{}, {}\n'.format(j, number))
|
<commit_before><commit_msg>Create dummy files for /data<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# EXPLANATION:
# This file fills the folder /data with dummy files (necessary for development
# purposes while we don't have real data yet)
# -----------------------------------------------------------------------------
import numpy as np
# Get the number of people
n_people = 0
with open('people.csv', 'r') as f:
n_people = len(f.readlines())
# Create a dummy file for each person
for i in range(n_people):
with open('./data/{}.csv'.format(i), 'w+') as f:
for j in range(n_people):
number = int(np.random.uniform(0,1) < 0.1)
if j==i:
number = 0
f.write('{}, {}\n'.format(j, number))
|
|
462205a8dde700b4d5f36225bbe5f9d15b59832b
|
Climate_Police/tests/test_pollution_map.py
|
Climate_Police/tests/test_pollution_map.py
|
#run the test with default values of df, state and year
import unittest
from pollution_map import pollution_map
import pandas as pd
df = pd.read_csv("../data/pollution_us_2000_2016.csv")
source = 'CO' # options: NO2, O3, SO2 and CO
year = '2008' # options: 2000 - 2016
option = 'Mean' # options: Mean, AQI, 1st Max Value
class TestPlot(unittest.TestCase):
def testPlotPollutants(self):
fig, flag = pollution_map(df, source, year, option)
expected_explanation="Pollution map plotted."
self.assertTrue(flag, expected_explanation)
if __name__ == '__main__':
unittest.main()
|
Add unit test for pollution_map
|
Add unit test for pollution_map
|
Python
|
mit
|
abhisheksugam/Climate_Police
|
Add unit test for pollution_map
|
#run the test with default values of df, state and year
import unittest
from pollution_map import pollution_map
import pandas as pd
df = pd.read_csv("../data/pollution_us_2000_2016.csv")
source = 'CO' # options: NO2, O3, SO2 and CO
year = '2008' # options: 2000 - 2016
option = 'Mean' # options: Mean, AQI, 1st Max Value
class TestPlot(unittest.TestCase):
def testPlotPollutants(self):
fig, flag = pollution_map(df, source, year, option)
expected_explanation="Pollution map plotted."
self.assertTrue(flag, expected_explanation)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit test for pollution_map<commit_after>
|
#run the test with default values of df, state and year
import unittest
from pollution_map import pollution_map
import pandas as pd
df = pd.read_csv("../data/pollution_us_2000_2016.csv")
source = 'CO' # options: NO2, O3, SO2 and CO
year = '2008' # options: 2000 - 2016
option = 'Mean' # options: Mean, AQI, 1st Max Value
class TestPlot(unittest.TestCase):
def testPlotPollutants(self):
fig, flag = pollution_map(df, source, year, option)
expected_explanation="Pollution map plotted."
self.assertTrue(flag, expected_explanation)
if __name__ == '__main__':
unittest.main()
|
Add unit test for pollution_map#run the test with default values of df, state and year
import unittest
from pollution_map import pollution_map
import pandas as pd
df = pd.read_csv("../data/pollution_us_2000_2016.csv")
source = 'CO' # options: NO2, O3, SO2 and CO
year = '2008' # options: 2000 - 2016
option = 'Mean' # options: Mean, AQI, 1st Max Value
class TestPlot(unittest.TestCase):
def testPlotPollutants(self):
fig, flag = pollution_map(df, source, year, option)
expected_explanation="Pollution map plotted."
self.assertTrue(flag, expected_explanation)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add unit test for pollution_map<commit_after>#run the test with default values of df, state and year
import unittest
from pollution_map import pollution_map
import pandas as pd
df = pd.read_csv("../data/pollution_us_2000_2016.csv")
source = 'CO' # options: NO2, O3, SO2 and CO
year = '2008' # options: 2000 - 2016
option = 'Mean' # options: Mean, AQI, 1st Max Value
class TestPlot(unittest.TestCase):
def testPlotPollutants(self):
fig, flag = pollution_map(df, source, year, option)
expected_explanation="Pollution map plotted."
self.assertTrue(flag, expected_explanation)
if __name__ == '__main__':
unittest.main()
|
|
4392e56f520cd50454a4a8e804f7382276ee3c3d
|
valid_options/asset_class.py
|
valid_options/asset_class.py
|
from enum import Enum
class AssetClass(Enum):
CASH_EQUIVALENTS = "Cash Equivalents"
COMMODITIES = "Commodities"
EQUITIES = "Equities"
FIXED_INCOME = "Fixed Income"
REAL_ESTATE = "Real Estate"
NONE = "None"
|
Create enum for asset classes
|
Create enum for asset classes
|
Python
|
mit
|
cmvandrevala/finance_scripts,cmvandrevala/finance_scripts,cmvandrevala/finance_scripts
|
Create enum for asset classes
|
from enum import Enum
class AssetClass(Enum):
CASH_EQUIVALENTS = "Cash Equivalents"
COMMODITIES = "Commodities"
EQUITIES = "Equities"
FIXED_INCOME = "Fixed Income"
REAL_ESTATE = "Real Estate"
NONE = "None"
|
<commit_before><commit_msg>Create enum for asset classes<commit_after>
|
from enum import Enum
class AssetClass(Enum):
CASH_EQUIVALENTS = "Cash Equivalents"
COMMODITIES = "Commodities"
EQUITIES = "Equities"
FIXED_INCOME = "Fixed Income"
REAL_ESTATE = "Real Estate"
NONE = "None"
|
Create enum for asset classesfrom enum import Enum
class AssetClass(Enum):
CASH_EQUIVALENTS = "Cash Equivalents"
COMMODITIES = "Commodities"
EQUITIES = "Equities"
FIXED_INCOME = "Fixed Income"
REAL_ESTATE = "Real Estate"
NONE = "None"
|
<commit_before><commit_msg>Create enum for asset classes<commit_after>from enum import Enum
class AssetClass(Enum):
CASH_EQUIVALENTS = "Cash Equivalents"
COMMODITIES = "Commodities"
EQUITIES = "Equities"
FIXED_INCOME = "Fixed Income"
REAL_ESTATE = "Real Estate"
NONE = "None"
|
|
dd10599a0625e3ab53d2e84612f9162a7e9dbbaf
|
scripts/numba_cuda.py
|
scripts/numba_cuda.py
|
# install nvidia-cuda-toolkit into the OS
# conda install numba
# conda install cudatoolkit -- otherwise will error out
import numpy as np
from numba import vectorize
from time import perf_counter
@vectorize(['float32(float32, float32)'], target='cuda')
def add_by_gpu(a, b):
return a + b
@vectorize(['float32(float32, float32)'], target='cpu')
def add_by_cpu(a, b):
return a + b
def timeit(func, *args, **kwargs):
start = perf_counter()
result = func(*args, **kwargs)
end = perf_counter()
return end-start, result
if __name__ == '__main__':
# Init
N = (1000, 1000)
A = np.ones(N, dtype=np.float32)
B = np.ones(A.shape, dtype=A.dtype)
C = np.empty_like(A, dtype=A.dtype)
# CPU
t, C = timeit(add_by_cpu, A, B)
print(C)
print('CPU time', t)
# GPU
t, C = timeit(add_by_gpu, A, B)
print(C)
print('GPU time', t)
|
Add script to test cuda via numba
|
Add script to test cuda via numba
|
Python
|
mit
|
neurite/debian-setup,neurite/debian-setup
|
Add script to test cuda via numba
|
# install nvidia-cuda-toolkit into the OS
# conda install numba
# conda install cudatoolkit -- otherwise will error out
import numpy as np
from numba import vectorize
from time import perf_counter
@vectorize(['float32(float32, float32)'], target='cuda')
def add_by_gpu(a, b):
return a + b
@vectorize(['float32(float32, float32)'], target='cpu')
def add_by_cpu(a, b):
return a + b
def timeit(func, *args, **kwargs):
start = perf_counter()
result = func(*args, **kwargs)
end = perf_counter()
return end-start, result
if __name__ == '__main__':
# Init
N = (1000, 1000)
A = np.ones(N, dtype=np.float32)
B = np.ones(A.shape, dtype=A.dtype)
C = np.empty_like(A, dtype=A.dtype)
# CPU
t, C = timeit(add_by_cpu, A, B)
print(C)
print('CPU time', t)
# GPU
t, C = timeit(add_by_gpu, A, B)
print(C)
print('GPU time', t)
|
<commit_before><commit_msg>Add script to test cuda via numba<commit_after>
|
# install nvidia-cuda-toolkit into the OS
# conda install numba
# conda install cudatoolkit -- otherwise will error out
import numpy as np
from numba import vectorize
from time import perf_counter
@vectorize(['float32(float32, float32)'], target='cuda')
def add_by_gpu(a, b):
return a + b
@vectorize(['float32(float32, float32)'], target='cpu')
def add_by_cpu(a, b):
return a + b
def timeit(func, *args, **kwargs):
start = perf_counter()
result = func(*args, **kwargs)
end = perf_counter()
return end-start, result
if __name__ == '__main__':
# Init
N = (1000, 1000)
A = np.ones(N, dtype=np.float32)
B = np.ones(A.shape, dtype=A.dtype)
C = np.empty_like(A, dtype=A.dtype)
# CPU
t, C = timeit(add_by_cpu, A, B)
print(C)
print('CPU time', t)
# GPU
t, C = timeit(add_by_gpu, A, B)
print(C)
print('GPU time', t)
|
Add script to test cuda via numba# install nvidia-cuda-toolkit into the OS
# conda install numba
# conda install cudatoolkit -- otherwise will error out
import numpy as np
from numba import vectorize
from time import perf_counter
@vectorize(['float32(float32, float32)'], target='cuda')
def add_by_gpu(a, b):
return a + b
@vectorize(['float32(float32, float32)'], target='cpu')
def add_by_cpu(a, b):
return a + b
def timeit(func, *args, **kwargs):
start = perf_counter()
result = func(*args, **kwargs)
end = perf_counter()
return end-start, result
if __name__ == '__main__':
# Init
N = (1000, 1000)
A = np.ones(N, dtype=np.float32)
B = np.ones(A.shape, dtype=A.dtype)
C = np.empty_like(A, dtype=A.dtype)
# CPU
t, C = timeit(add_by_cpu, A, B)
print(C)
print('CPU time', t)
# GPU
t, C = timeit(add_by_gpu, A, B)
print(C)
print('GPU time', t)
|
<commit_before><commit_msg>Add script to test cuda via numba<commit_after># install nvidia-cuda-toolkit into the OS
# conda install numba
# conda install cudatoolkit -- otherwise will error out
import numpy as np
from numba import vectorize
from time import perf_counter
@vectorize(['float32(float32, float32)'], target='cuda')
def add_by_gpu(a, b):
return a + b
@vectorize(['float32(float32, float32)'], target='cpu')
def add_by_cpu(a, b):
return a + b
def timeit(func, *args, **kwargs):
start = perf_counter()
result = func(*args, **kwargs)
end = perf_counter()
return end-start, result
if __name__ == '__main__':
# Init
N = (1000, 1000)
A = np.ones(N, dtype=np.float32)
B = np.ones(A.shape, dtype=A.dtype)
C = np.empty_like(A, dtype=A.dtype)
# CPU
t, C = timeit(add_by_cpu, A, B)
print(C)
print('CPU time', t)
# GPU
t, C = timeit(add_by_gpu, A, B)
print(C)
print('GPU time', t)
|
|
90b3e60e52ff2f442b2e77e1a8cdf941127a09e0
|
candidates/migrations/0003_create_user_terms_agreements.py
|
candidates/migrations/0003_create_user_terms_agreements.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def create_user_terms_agreements(apps, schema_editor):
User = apps.get_model('auth', 'User')
UserTermsAgreement = apps.get_model('candidates', 'UserTermsAgreement')
for u in User.objects.all():
UserTermsAgreement.objects.get_or_create(user=u)
class Migration(migrations.Migration):
dependencies = [
('candidates', '0002_usertermsagreement'),
]
operations = [
migrations.RunPython(
create_user_terms_agreements
)
]
|
Create a UserTermsAgreement object for every existing User
|
Create a UserTermsAgreement object for every existing User
|
Python
|
agpl-3.0
|
neavouli/yournextrepresentative,mysociety/yournextrepresentative,mysociety/yournextrepresentative,datamade/yournextmp-popit,neavouli/yournextrepresentative,mysociety/yournextrepresentative,openstate/yournextrepresentative,mysociety/yournextmp-popit,YoQuieroSaber/yournextrepresentative,neavouli/yournextrepresentative,openstate/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextrepresentative,YoQuieroSaber/yournextrepresentative,DemocracyClub/yournextrepresentative,datamade/yournextmp-popit,DemocracyClub/yournextrepresentative,mysociety/yournextrepresentative,openstate/yournextrepresentative,openstate/yournextrepresentative,YoQuieroSaber/yournextrepresentative,openstate/yournextrepresentative,datamade/yournextmp-popit,neavouli/yournextrepresentative,mysociety/yournextmp-popit,neavouli/yournextrepresentative,datamade/yournextmp-popit,datamade/yournextmp-popit,YoQuieroSaber/yournextrepresentative,YoQuieroSaber/yournextrepresentative,mysociety/yournextmp-popit,mysociety/yournextmp-popit,DemocracyClub/yournextrepresentative
|
Create a UserTermsAgreement object for every existing User
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def create_user_terms_agreements(apps, schema_editor):
User = apps.get_model('auth', 'User')
UserTermsAgreement = apps.get_model('candidates', 'UserTermsAgreement')
for u in User.objects.all():
UserTermsAgreement.objects.get_or_create(user=u)
class Migration(migrations.Migration):
dependencies = [
('candidates', '0002_usertermsagreement'),
]
operations = [
migrations.RunPython(
create_user_terms_agreements
)
]
|
<commit_before><commit_msg>Create a UserTermsAgreement object for every existing User<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def create_user_terms_agreements(apps, schema_editor):
User = apps.get_model('auth', 'User')
UserTermsAgreement = apps.get_model('candidates', 'UserTermsAgreement')
for u in User.objects.all():
UserTermsAgreement.objects.get_or_create(user=u)
class Migration(migrations.Migration):
dependencies = [
('candidates', '0002_usertermsagreement'),
]
operations = [
migrations.RunPython(
create_user_terms_agreements
)
]
|
Create a UserTermsAgreement object for every existing User# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def create_user_terms_agreements(apps, schema_editor):
User = apps.get_model('auth', 'User')
UserTermsAgreement = apps.get_model('candidates', 'UserTermsAgreement')
for u in User.objects.all():
UserTermsAgreement.objects.get_or_create(user=u)
class Migration(migrations.Migration):
dependencies = [
('candidates', '0002_usertermsagreement'),
]
operations = [
migrations.RunPython(
create_user_terms_agreements
)
]
|
<commit_before><commit_msg>Create a UserTermsAgreement object for every existing User<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
def create_user_terms_agreements(apps, schema_editor):
User = apps.get_model('auth', 'User')
UserTermsAgreement = apps.get_model('candidates', 'UserTermsAgreement')
for u in User.objects.all():
UserTermsAgreement.objects.get_or_create(user=u)
class Migration(migrations.Migration):
dependencies = [
('candidates', '0002_usertermsagreement'),
]
operations = [
migrations.RunPython(
create_user_terms_agreements
)
]
|
|
6b2896c9d31da924eb4f371e1f477b384056bd58
|
log-importer.py
|
log-importer.py
|
# Copyright (c) Weasyl LLC
# See COPYING for details.
import argparse
import datetime
import os.path
import re
import elastirc
from whoosh import index
line_pattern = re.compile(
r'(?P<time>[0-9:]{8}) (?P<formatted>'
r'\(-\) (?P<actor>[^ ]+?) '
r'(?P<action>joined|parted|quit'
r'|was kicked by (?P<kicker>[^ ]+?)'
r'|changed nick from (?P<oldName>[^ ]+?)'
r'|changed topic to (?P<topic>.*)'
r'|set mode .+)'
r'(?: \((?P<reason>.*)\))?'
r'|<(?P<message_actor>[^>]+?)> (?P<message>.*)'
r'|\* (?P<emote_actor>[^ ]+?) (?P<emote>.*)'
r')'
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--create-index', default=False, action='store_true')
parser.add_argument('index')
parser.add_argument('infiles', nargs='*', type=argparse.FileType('r'))
args = parser.parse_args()
if args.create_index:
if not os.path.exists(args.index):
os.makedirs(args.index)
ix = index.create_in(args.index, elastirc.whooshSchema)
else:
ix = index.open_dir(args.index)
writer = ix.writer()
for infile in args.infiles:
basename = os.path.basename(infile.name)
print 'indexing', basename
channel, _, date = basename.rpartition('.')
channel = channel.decode('utf-8')
for line in infile:
line = line.decode('utf-8')
groups = line_pattern.match(line).groupdict()
if groups['message_actor']:
doc = {'actor': groups['message_actor'], 'message': groups['message']}
elif groups['emote_actor']:
doc = {'actor': groups['emote_actor'], 'message': groups['emote']}
else:
doc = {}
for key in ['actor', 'kicker', 'oldName', 'topic', 'reason']:
if groups[key]:
doc[key] = groups[key]
doc['formatted'] = groups['formatted']
doc['channel'] = channel
doc['receivedAt'] = datetime.datetime.strptime(
'%sT%s' % (date, groups['time']), '%Y-%m-%dT%H:%M:%S')
writer.add_document(**doc)
writer.commit()
main()
|
Add a log importer for logs that need indexing.
|
Add a log importer for logs that need indexing.
|
Python
|
isc
|
Weasyl/elastirc
|
Add a log importer for logs that need indexing.
|
# Copyright (c) Weasyl LLC
# See COPYING for details.
import argparse
import datetime
import os.path
import re
import elastirc
from whoosh import index
line_pattern = re.compile(
r'(?P<time>[0-9:]{8}) (?P<formatted>'
r'\(-\) (?P<actor>[^ ]+?) '
r'(?P<action>joined|parted|quit'
r'|was kicked by (?P<kicker>[^ ]+?)'
r'|changed nick from (?P<oldName>[^ ]+?)'
r'|changed topic to (?P<topic>.*)'
r'|set mode .+)'
r'(?: \((?P<reason>.*)\))?'
r'|<(?P<message_actor>[^>]+?)> (?P<message>.*)'
r'|\* (?P<emote_actor>[^ ]+?) (?P<emote>.*)'
r')'
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--create-index', default=False, action='store_true')
parser.add_argument('index')
parser.add_argument('infiles', nargs='*', type=argparse.FileType('r'))
args = parser.parse_args()
if args.create_index:
if not os.path.exists(args.index):
os.makedirs(args.index)
ix = index.create_in(args.index, elastirc.whooshSchema)
else:
ix = index.open_dir(args.index)
writer = ix.writer()
for infile in args.infiles:
basename = os.path.basename(infile.name)
print 'indexing', basename
channel, _, date = basename.rpartition('.')
channel = channel.decode('utf-8')
for line in infile:
line = line.decode('utf-8')
groups = line_pattern.match(line).groupdict()
if groups['message_actor']:
doc = {'actor': groups['message_actor'], 'message': groups['message']}
elif groups['emote_actor']:
doc = {'actor': groups['emote_actor'], 'message': groups['emote']}
else:
doc = {}
for key in ['actor', 'kicker', 'oldName', 'topic', 'reason']:
if groups[key]:
doc[key] = groups[key]
doc['formatted'] = groups['formatted']
doc['channel'] = channel
doc['receivedAt'] = datetime.datetime.strptime(
'%sT%s' % (date, groups['time']), '%Y-%m-%dT%H:%M:%S')
writer.add_document(**doc)
writer.commit()
main()
|
<commit_before><commit_msg>Add a log importer for logs that need indexing.<commit_after>
|
# Copyright (c) Weasyl LLC
# See COPYING for details.
import argparse
import datetime
import os.path
import re
import elastirc
from whoosh import index
line_pattern = re.compile(
r'(?P<time>[0-9:]{8}) (?P<formatted>'
r'\(-\) (?P<actor>[^ ]+?) '
r'(?P<action>joined|parted|quit'
r'|was kicked by (?P<kicker>[^ ]+?)'
r'|changed nick from (?P<oldName>[^ ]+?)'
r'|changed topic to (?P<topic>.*)'
r'|set mode .+)'
r'(?: \((?P<reason>.*)\))?'
r'|<(?P<message_actor>[^>]+?)> (?P<message>.*)'
r'|\* (?P<emote_actor>[^ ]+?) (?P<emote>.*)'
r')'
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--create-index', default=False, action='store_true')
parser.add_argument('index')
parser.add_argument('infiles', nargs='*', type=argparse.FileType('r'))
args = parser.parse_args()
if args.create_index:
if not os.path.exists(args.index):
os.makedirs(args.index)
ix = index.create_in(args.index, elastirc.whooshSchema)
else:
ix = index.open_dir(args.index)
writer = ix.writer()
for infile in args.infiles:
basename = os.path.basename(infile.name)
print 'indexing', basename
channel, _, date = basename.rpartition('.')
channel = channel.decode('utf-8')
for line in infile:
line = line.decode('utf-8')
groups = line_pattern.match(line).groupdict()
if groups['message_actor']:
doc = {'actor': groups['message_actor'], 'message': groups['message']}
elif groups['emote_actor']:
doc = {'actor': groups['emote_actor'], 'message': groups['emote']}
else:
doc = {}
for key in ['actor', 'kicker', 'oldName', 'topic', 'reason']:
if groups[key]:
doc[key] = groups[key]
doc['formatted'] = groups['formatted']
doc['channel'] = channel
doc['receivedAt'] = datetime.datetime.strptime(
'%sT%s' % (date, groups['time']), '%Y-%m-%dT%H:%M:%S')
writer.add_document(**doc)
writer.commit()
main()
|
Add a log importer for logs that need indexing.# Copyright (c) Weasyl LLC
# See COPYING for details.
import argparse
import datetime
import os.path
import re
import elastirc
from whoosh import index
line_pattern = re.compile(
r'(?P<time>[0-9:]{8}) (?P<formatted>'
r'\(-\) (?P<actor>[^ ]+?) '
r'(?P<action>joined|parted|quit'
r'|was kicked by (?P<kicker>[^ ]+?)'
r'|changed nick from (?P<oldName>[^ ]+?)'
r'|changed topic to (?P<topic>.*)'
r'|set mode .+)'
r'(?: \((?P<reason>.*)\))?'
r'|<(?P<message_actor>[^>]+?)> (?P<message>.*)'
r'|\* (?P<emote_actor>[^ ]+?) (?P<emote>.*)'
r')'
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--create-index', default=False, action='store_true')
parser.add_argument('index')
parser.add_argument('infiles', nargs='*', type=argparse.FileType('r'))
args = parser.parse_args()
if args.create_index:
if not os.path.exists(args.index):
os.makedirs(args.index)
ix = index.create_in(args.index, elastirc.whooshSchema)
else:
ix = index.open_dir(args.index)
writer = ix.writer()
for infile in args.infiles:
basename = os.path.basename(infile.name)
print 'indexing', basename
channel, _, date = basename.rpartition('.')
channel = channel.decode('utf-8')
for line in infile:
line = line.decode('utf-8')
groups = line_pattern.match(line).groupdict()
if groups['message_actor']:
doc = {'actor': groups['message_actor'], 'message': groups['message']}
elif groups['emote_actor']:
doc = {'actor': groups['emote_actor'], 'message': groups['emote']}
else:
doc = {}
for key in ['actor', 'kicker', 'oldName', 'topic', 'reason']:
if groups[key]:
doc[key] = groups[key]
doc['formatted'] = groups['formatted']
doc['channel'] = channel
doc['receivedAt'] = datetime.datetime.strptime(
'%sT%s' % (date, groups['time']), '%Y-%m-%dT%H:%M:%S')
writer.add_document(**doc)
writer.commit()
main()
|
<commit_before><commit_msg>Add a log importer for logs that need indexing.<commit_after># Copyright (c) Weasyl LLC
# See COPYING for details.
import argparse
import datetime
import os.path
import re
import elastirc
from whoosh import index
line_pattern = re.compile(
r'(?P<time>[0-9:]{8}) (?P<formatted>'
r'\(-\) (?P<actor>[^ ]+?) '
r'(?P<action>joined|parted|quit'
r'|was kicked by (?P<kicker>[^ ]+?)'
r'|changed nick from (?P<oldName>[^ ]+?)'
r'|changed topic to (?P<topic>.*)'
r'|set mode .+)'
r'(?: \((?P<reason>.*)\))?'
r'|<(?P<message_actor>[^>]+?)> (?P<message>.*)'
r'|\* (?P<emote_actor>[^ ]+?) (?P<emote>.*)'
r')'
)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--create-index', default=False, action='store_true')
parser.add_argument('index')
parser.add_argument('infiles', nargs='*', type=argparse.FileType('r'))
args = parser.parse_args()
if args.create_index:
if not os.path.exists(args.index):
os.makedirs(args.index)
ix = index.create_in(args.index, elastirc.whooshSchema)
else:
ix = index.open_dir(args.index)
writer = ix.writer()
for infile in args.infiles:
basename = os.path.basename(infile.name)
print 'indexing', basename
channel, _, date = basename.rpartition('.')
channel = channel.decode('utf-8')
for line in infile:
line = line.decode('utf-8')
groups = line_pattern.match(line).groupdict()
if groups['message_actor']:
doc = {'actor': groups['message_actor'], 'message': groups['message']}
elif groups['emote_actor']:
doc = {'actor': groups['emote_actor'], 'message': groups['emote']}
else:
doc = {}
for key in ['actor', 'kicker', 'oldName', 'topic', 'reason']:
if groups[key]:
doc[key] = groups[key]
doc['formatted'] = groups['formatted']
doc['channel'] = channel
doc['receivedAt'] = datetime.datetime.strptime(
'%sT%s' % (date, groups['time']), '%Y-%m-%dT%H:%M:%S')
writer.add_document(**doc)
writer.commit()
main()
|
|
96aee303e9fcc3ef92d176418879089ad4f328a6
|
indexing/src/ldsImporter.py
|
indexing/src/ldsImporter.py
|
from collections import namedtuple
# BoundingBox:
# top (int) Top of the box
# bottom (int) Bottom of the box
# left (int) Left of the box
# right (int) Right of the box
BoundingBox = namedtuple('BoundingBox', 'top bottom left right')
# Record:
# line (int) Line number on the image for the record
# sex (string) Gender - either 'M', 'F', or ''
# race (string) Race or color - e.g. 'White'
# married (string) Martial status - either 'M', 'S', 'D', 'W', or ''
Record = namedtuple('Record', 'line sex race married')
# RecordBoundingBoxes
# line (int) Line number on the image for the record
# sexBox (BoundingBox) Bounding box for the sex field on the record's line
# raceBox (BoundingBox) Bounding box for the race field on the record's line
# marriedBox (BoundingBox) Bounding box for the marital status field on the record's line
RecordBoundingBoxes = namedtuple('RecordBoundingBoxes', 'line sexBox raceBox marriedBox')
def getAttributeContents(node, attribute):
'''
Returns the content of the given attribute to the given node.
If the attribute doesn't exist (or there are more than one), then an
AssertionError is thrown.
'''
a = node.xpathEval('./@' + attribute)
assert len(a) > 0, 'Attribute {0} not found in node {1}'.format(attribute, node)
assert len(a) < 2, 'Duplicate attributes {0} found in node {1}'.format(attribute, node)
return a[0].get_content()
class ImageData(object):
'''
Represents the data for a single image
Has the following attributes:
- imagePath File path to image file
- trueRecords List of Record objects from the true data set
- aRecords List of Record objects from indexer A
- bRecords List of Record objects from indexer B
- arbRecords List of Record objects after arbitration of A and B
- companyRecords List of Record objects from "The Company"
- boundingBoxes List of RecordBoundingBoxes objects
'''
def __init__(self, imagePath):
'Creates an empty ImageData'
self.imagePath = imagePath
self.trueRecords = []
self.aRecords = []
self.bRecords = []
self.arbRecords = []
self.companyRecords = []
self.boundingBoxes = []
def parseTrueXml(filepath):
'''
Populates the self.trueRecords list (appending)
'''
pass
def parseAbarbXml(filepath):
'''
Populates the self.aRecords, self.bRecords, and self.arbRecords lists (appending)
'''
pass
def parseCompanyXml(filepath):
'''
Populates the self.companyRecords and self.boundingBoxes lists (appending)
'''
pass
def readFiles(directory):
'''
Reads the files from the given directory and returns a list of ImageData objects
'''
pass
|
Add empty implementation of the importer
|
src: Add empty implementation of the importer
|
Python
|
mit
|
mikebentley15/cs6350_project_ml
|
src: Add empty implementation of the importer
|
from collections import namedtuple
# BoundingBox:
# top (int) Top of the box
# bottom (int) Bottom of the box
# left (int) Left of the box
# right (int) Right of the box
BoundingBox = namedtuple('BoundingBox', 'top bottom left right')
# Record:
# line (int) Line number on the image for the record
# sex (string) Gender - either 'M', 'F', or ''
# race (string) Race or color - e.g. 'White'
# married (string) Martial status - either 'M', 'S', 'D', 'W', or ''
Record = namedtuple('Record', 'line sex race married')
# RecordBoundingBoxes
# line (int) Line number on the image for the record
# sexBox (BoundingBox) Bounding box for the sex field on the record's line
# raceBox (BoundingBox) Bounding box for the race field on the record's line
# marriedBox (BoundingBox) Bounding box for the marital status field on the record's line
RecordBoundingBoxes = namedtuple('RecordBoundingBoxes', 'line sexBox raceBox marriedBox')
def getAttributeContents(node, attribute):
'''
Returns the content of the given attribute to the given node.
If the attribute doesn't exist (or there are more than one), then an
AssertionError is thrown.
'''
a = node.xpathEval('./@' + attribute)
assert len(a) > 0, 'Attribute {0} not found in node {1}'.format(attribute, node)
assert len(a) < 2, 'Duplicate attributes {0} found in node {1}'.format(attribute, node)
return a[0].get_content()
class ImageData(object):
'''
Represents the data for a single image
Has the following attributes:
- imagePath File path to image file
- trueRecords List of Record objects from the true data set
- aRecords List of Record objects from indexer A
- bRecords List of Record objects from indexer B
- arbRecords List of Record objects after arbitration of A and B
- companyRecords List of Record objects from "The Company"
- boundingBoxes List of RecordBoundingBoxes objects
'''
def __init__(self, imagePath):
'Creates an empty ImageData'
self.imagePath = imagePath
self.trueRecords = []
self.aRecords = []
self.bRecords = []
self.arbRecords = []
self.companyRecords = []
self.boundingBoxes = []
def parseTrueXml(filepath):
'''
Populates the self.trueRecords list (appending)
'''
pass
def parseAbarbXml(filepath):
'''
Populates the self.aRecords, self.bRecords, and self.arbRecords lists (appending)
'''
pass
def parseCompanyXml(filepath):
'''
Populates the self.companyRecords and self.boundingBoxes lists (appending)
'''
pass
def readFiles(directory):
'''
Reads the files from the given directory and returns a list of ImageData objects
'''
pass
|
<commit_before><commit_msg>src: Add empty implementation of the importer<commit_after>
|
from collections import namedtuple
# BoundingBox:
# top (int) Top of the box
# bottom (int) Bottom of the box
# left (int) Left of the box
# right (int) Right of the box
BoundingBox = namedtuple('BoundingBox', 'top bottom left right')
# Record:
# line (int) Line number on the image for the record
# sex (string) Gender - either 'M', 'F', or ''
# race (string) Race or color - e.g. 'White'
# married (string) Martial status - either 'M', 'S', 'D', 'W', or ''
Record = namedtuple('Record', 'line sex race married')
# RecordBoundingBoxes
# line (int) Line number on the image for the record
# sexBox (BoundingBox) Bounding box for the sex field on the record's line
# raceBox (BoundingBox) Bounding box for the race field on the record's line
# marriedBox (BoundingBox) Bounding box for the marital status field on the record's line
RecordBoundingBoxes = namedtuple('RecordBoundingBoxes', 'line sexBox raceBox marriedBox')
def getAttributeContents(node, attribute):
'''
Returns the content of the given attribute to the given node.
If the attribute doesn't exist (or there are more than one), then an
AssertionError is thrown.
'''
a = node.xpathEval('./@' + attribute)
assert len(a) > 0, 'Attribute {0} not found in node {1}'.format(attribute, node)
assert len(a) < 2, 'Duplicate attributes {0} found in node {1}'.format(attribute, node)
return a[0].get_content()
class ImageData(object):
'''
Represents the data for a single image
Has the following attributes:
- imagePath File path to image file
- trueRecords List of Record objects from the true data set
- aRecords List of Record objects from indexer A
- bRecords List of Record objects from indexer B
- arbRecords List of Record objects after arbitration of A and B
- companyRecords List of Record objects from "The Company"
- boundingBoxes List of RecordBoundingBoxes objects
'''
def __init__(self, imagePath):
'Creates an empty ImageData'
self.imagePath = imagePath
self.trueRecords = []
self.aRecords = []
self.bRecords = []
self.arbRecords = []
self.companyRecords = []
self.boundingBoxes = []
def parseTrueXml(filepath):
'''
Populates the self.trueRecords list (appending)
'''
pass
def parseAbarbXml(filepath):
'''
Populates the self.aRecords, self.bRecords, and self.arbRecords lists (appending)
'''
pass
def parseCompanyXml(filepath):
'''
Populates the self.companyRecords and self.boundingBoxes lists (appending)
'''
pass
def readFiles(directory):
'''
Reads the files from the given directory and returns a list of ImageData objects
'''
pass
|
src: Add empty implementation of the importerfrom collections import namedtuple
# BoundingBox:
# top (int) Top of the box
# bottom (int) Bottom of the box
# left (int) Left of the box
# right (int) Right of the box
BoundingBox = namedtuple('BoundingBox', 'top bottom left right')
# Record:
# line (int) Line number on the image for the record
# sex (string) Gender - either 'M', 'F', or ''
# race (string) Race or color - e.g. 'White'
# married (string) Martial status - either 'M', 'S', 'D', 'W', or ''
Record = namedtuple('Record', 'line sex race married')
# RecordBoundingBoxes
# line (int) Line number on the image for the record
# sexBox (BoundingBox) Bounding box for the sex field on the record's line
# raceBox (BoundingBox) Bounding box for the race field on the record's line
# marriedBox (BoundingBox) Bounding box for the marital status field on the record's line
RecordBoundingBoxes = namedtuple('RecordBoundingBoxes', 'line sexBox raceBox marriedBox')
def getAttributeContents(node, attribute):
'''
Returns the content of the given attribute to the given node.
If the attribute doesn't exist (or there are more than one), then an
AssertionError is thrown.
'''
a = node.xpathEval('./@' + attribute)
assert len(a) > 0, 'Attribute {0} not found in node {1}'.format(attribute, node)
assert len(a) < 2, 'Duplicate attributes {0} found in node {1}'.format(attribute, node)
return a[0].get_content()
class ImageData(object):
'''
Represents the data for a single image
Has the following attributes:
- imagePath File path to image file
- trueRecords List of Record objects from the true data set
- aRecords List of Record objects from indexer A
- bRecords List of Record objects from indexer B
- arbRecords List of Record objects after arbitration of A and B
- companyRecords List of Record objects from "The Company"
- boundingBoxes List of RecordBoundingBoxes objects
'''
def __init__(self, imagePath):
'Creates an empty ImageData'
self.imagePath = imagePath
self.trueRecords = []
self.aRecords = []
self.bRecords = []
self.arbRecords = []
self.companyRecords = []
self.boundingBoxes = []
def parseTrueXml(filepath):
'''
Populates the self.trueRecords list (appending)
'''
pass
def parseAbarbXml(filepath):
'''
Populates the self.aRecords, self.bRecords, and self.arbRecords lists (appending)
'''
pass
def parseCompanyXml(filepath):
'''
Populates the self.companyRecords and self.boundingBoxes lists (appending)
'''
pass
def readFiles(directory):
'''
Reads the files from the given directory and returns a list of ImageData objects
'''
pass
|
<commit_before><commit_msg>src: Add empty implementation of the importer<commit_after>from collections import namedtuple
# BoundingBox:
# top (int) Top of the box
# bottom (int) Bottom of the box
# left (int) Left of the box
# right (int) Right of the box
BoundingBox = namedtuple('BoundingBox', 'top bottom left right')
# Record:
# line (int) Line number on the image for the record
# sex (string) Gender - either 'M', 'F', or ''
# race (string) Race or color - e.g. 'White'
# married (string) Martial status - either 'M', 'S', 'D', 'W', or ''
Record = namedtuple('Record', 'line sex race married')
# RecordBoundingBoxes
# line (int) Line number on the image for the record
# sexBox (BoundingBox) Bounding box for the sex field on the record's line
# raceBox (BoundingBox) Bounding box for the race field on the record's line
# marriedBox (BoundingBox) Bounding box for the marital status field on the record's line
RecordBoundingBoxes = namedtuple('RecordBoundingBoxes', 'line sexBox raceBox marriedBox')
def getAttributeContents(node, attribute):
'''
Returns the content of the given attribute to the given node.
If the attribute doesn't exist (or there are more than one), then an
AssertionError is thrown.
'''
a = node.xpathEval('./@' + attribute)
assert len(a) > 0, 'Attribute {0} not found in node {1}'.format(attribute, node)
assert len(a) < 2, 'Duplicate attributes {0} found in node {1}'.format(attribute, node)
return a[0].get_content()
class ImageData(object):
'''
Represents the data for a single image
Has the following attributes:
- imagePath File path to image file
- trueRecords List of Record objects from the true data set
- aRecords List of Record objects from indexer A
- bRecords List of Record objects from indexer B
- arbRecords List of Record objects after arbitration of A and B
- companyRecords List of Record objects from "The Company"
- boundingBoxes List of RecordBoundingBoxes objects
'''
def __init__(self, imagePath):
'Creates an empty ImageData'
self.imagePath = imagePath
self.trueRecords = []
self.aRecords = []
self.bRecords = []
self.arbRecords = []
self.companyRecords = []
self.boundingBoxes = []
def parseTrueXml(filepath):
'''
Populates the self.trueRecords list (appending)
'''
pass
def parseAbarbXml(filepath):
'''
Populates the self.aRecords, self.bRecords, and self.arbRecords lists (appending)
'''
pass
def parseCompanyXml(filepath):
'''
Populates the self.companyRecords and self.boundingBoxes lists (appending)
'''
pass
def readFiles(directory):
'''
Reads the files from the given directory and returns a list of ImageData objects
'''
pass
|
|
4e1dd595631949e7c4ccea62e58e60f7736ecefd
|
test/test_benchmarks.py
|
test/test_benchmarks.py
|
"""Tests of time-series prediction benchmarking functions"""
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
from bbs_benchmarks import *
def test_benchmark_predictions():
time = [1, 2, 3]
value = [4, 5, 6]
preds = benchmark_predictions(time, value, lag=1)
assert preds == [6, 5, 4.5]
def test_filter_timeseries_contiguous():
data = pd.DataFrame({'site': [1, 1, 1, 1, 2, 2], 'date': [1, 2, 3, 4, 1, 2]})
filtered = filter_timeseries(data, group_cols='site', date_col='date', min_years=3)
assert_frame_equal(filtered, pd.DataFrame({'site': [1, 1, 1, 1], 'date': [1, 2, 3, 4]}))
def test_filter_timeseries_noncontiguous_contigtrue():
data = pd.DataFrame({'site': [1, 1, 1, 1, 2, 2, 2], 'date': [1, 2, 3, 4, 1, 2, 4]})
filtered = filter_timeseries(data, group_cols='site', date_col='date', min_years=3)
assert_frame_equal(filtered, pd.DataFrame({'site': [1, 1, 1, 1], 'date': [1, 2, 3, 4]}))
def test_filter_timeseries_noncontiguous_contigfalse():
data = pd.DataFrame({'site': [1, 1, 1, 1, 2, 2, 2], 'date': [1, 2, 3, 4, 1, 2, 4]})
filtered = filter_timeseries(data, group_cols='site', date_col='date', min_years=3, contiguous=False)
assert_frame_equal(filtered, pd.DataFrame({'site': [1, 1, 1, 1, 2, 2, 2], 'date': [1, 2, 3, 4, 1, 2, 4]}))
|
Add initial tests for bbs_benchmarks.py
|
Add initial tests for bbs_benchmarks.py
|
Python
|
mit
|
davharris/bbs-forecasting,davharris/bbs-forecasting,davharris/bbs-forecasting
|
Add initial tests for bbs_benchmarks.py
|
"""Tests of time-series prediction benchmarking functions"""
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
from bbs_benchmarks import *
def test_benchmark_predictions():
time = [1, 2, 3]
value = [4, 5, 6]
preds = benchmark_predictions(time, value, lag=1)
assert preds == [6, 5, 4.5]
def test_filter_timeseries_contiguous():
data = pd.DataFrame({'site': [1, 1, 1, 1, 2, 2], 'date': [1, 2, 3, 4, 1, 2]})
filtered = filter_timeseries(data, group_cols='site', date_col='date', min_years=3)
assert_frame_equal(filtered, pd.DataFrame({'site': [1, 1, 1, 1], 'date': [1, 2, 3, 4]}))
def test_filter_timeseries_noncontiguous_contigtrue():
data = pd.DataFrame({'site': [1, 1, 1, 1, 2, 2, 2], 'date': [1, 2, 3, 4, 1, 2, 4]})
filtered = filter_timeseries(data, group_cols='site', date_col='date', min_years=3)
assert_frame_equal(filtered, pd.DataFrame({'site': [1, 1, 1, 1], 'date': [1, 2, 3, 4]}))
def test_filter_timeseries_noncontiguous_contigfalse():
data = pd.DataFrame({'site': [1, 1, 1, 1, 2, 2, 2], 'date': [1, 2, 3, 4, 1, 2, 4]})
filtered = filter_timeseries(data, group_cols='site', date_col='date', min_years=3, contiguous=False)
assert_frame_equal(filtered, pd.DataFrame({'site': [1, 1, 1, 1, 2, 2, 2], 'date': [1, 2, 3, 4, 1, 2, 4]}))
|
<commit_before><commit_msg>Add initial tests for bbs_benchmarks.py<commit_after>
|
"""Tests of time-series prediction benchmarking functions"""
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
from bbs_benchmarks import *
def test_benchmark_predictions():
time = [1, 2, 3]
value = [4, 5, 6]
preds = benchmark_predictions(time, value, lag=1)
assert preds == [6, 5, 4.5]
def test_filter_timeseries_contiguous():
data = pd.DataFrame({'site': [1, 1, 1, 1, 2, 2], 'date': [1, 2, 3, 4, 1, 2]})
filtered = filter_timeseries(data, group_cols='site', date_col='date', min_years=3)
assert_frame_equal(filtered, pd.DataFrame({'site': [1, 1, 1, 1], 'date': [1, 2, 3, 4]}))
def test_filter_timeseries_noncontiguous_contigtrue():
data = pd.DataFrame({'site': [1, 1, 1, 1, 2, 2, 2], 'date': [1, 2, 3, 4, 1, 2, 4]})
filtered = filter_timeseries(data, group_cols='site', date_col='date', min_years=3)
assert_frame_equal(filtered, pd.DataFrame({'site': [1, 1, 1, 1], 'date': [1, 2, 3, 4]}))
def test_filter_timeseries_noncontiguous_contigfalse():
data = pd.DataFrame({'site': [1, 1, 1, 1, 2, 2, 2], 'date': [1, 2, 3, 4, 1, 2, 4]})
filtered = filter_timeseries(data, group_cols='site', date_col='date', min_years=3, contiguous=False)
assert_frame_equal(filtered, pd.DataFrame({'site': [1, 1, 1, 1, 2, 2, 2], 'date': [1, 2, 3, 4, 1, 2, 4]}))
|
Add initial tests for bbs_benchmarks.py"""Tests of time-series prediction benchmarking functions"""
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
from bbs_benchmarks import *
def test_benchmark_predictions():
time = [1, 2, 3]
value = [4, 5, 6]
preds = benchmark_predictions(time, value, lag=1)
assert preds == [6, 5, 4.5]
def test_filter_timeseries_contiguous():
data = pd.DataFrame({'site': [1, 1, 1, 1, 2, 2], 'date': [1, 2, 3, 4, 1, 2]})
filtered = filter_timeseries(data, group_cols='site', date_col='date', min_years=3)
assert_frame_equal(filtered, pd.DataFrame({'site': [1, 1, 1, 1], 'date': [1, 2, 3, 4]}))
def test_filter_timeseries_noncontiguous_contigtrue():
data = pd.DataFrame({'site': [1, 1, 1, 1, 2, 2, 2], 'date': [1, 2, 3, 4, 1, 2, 4]})
filtered = filter_timeseries(data, group_cols='site', date_col='date', min_years=3)
assert_frame_equal(filtered, pd.DataFrame({'site': [1, 1, 1, 1], 'date': [1, 2, 3, 4]}))
def test_filter_timeseries_noncontiguous_contigfalse():
data = pd.DataFrame({'site': [1, 1, 1, 1, 2, 2, 2], 'date': [1, 2, 3, 4, 1, 2, 4]})
filtered = filter_timeseries(data, group_cols='site', date_col='date', min_years=3, contiguous=False)
assert_frame_equal(filtered, pd.DataFrame({'site': [1, 1, 1, 1, 2, 2, 2], 'date': [1, 2, 3, 4, 1, 2, 4]}))
|
<commit_before><commit_msg>Add initial tests for bbs_benchmarks.py<commit_after>"""Tests of time-series prediction benchmarking functions"""
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
from bbs_benchmarks import *
def test_benchmark_predictions():
time = [1, 2, 3]
value = [4, 5, 6]
preds = benchmark_predictions(time, value, lag=1)
assert preds == [6, 5, 4.5]
def test_filter_timeseries_contiguous():
data = pd.DataFrame({'site': [1, 1, 1, 1, 2, 2], 'date': [1, 2, 3, 4, 1, 2]})
filtered = filter_timeseries(data, group_cols='site', date_col='date', min_years=3)
assert_frame_equal(filtered, pd.DataFrame({'site': [1, 1, 1, 1], 'date': [1, 2, 3, 4]}))
def test_filter_timeseries_noncontiguous_contigtrue():
data = pd.DataFrame({'site': [1, 1, 1, 1, 2, 2, 2], 'date': [1, 2, 3, 4, 1, 2, 4]})
filtered = filter_timeseries(data, group_cols='site', date_col='date', min_years=3)
assert_frame_equal(filtered, pd.DataFrame({'site': [1, 1, 1, 1], 'date': [1, 2, 3, 4]}))
def test_filter_timeseries_noncontiguous_contigfalse():
data = pd.DataFrame({'site': [1, 1, 1, 1, 2, 2, 2], 'date': [1, 2, 3, 4, 1, 2, 4]})
filtered = filter_timeseries(data, group_cols='site', date_col='date', min_years=3, contiguous=False)
assert_frame_equal(filtered, pd.DataFrame({'site': [1, 1, 1, 1, 2, 2, 2], 'date': [1, 2, 3, 4, 1, 2, 4]}))
|
|
6183f2c177092b625ce3b86fdc4097ea92ed7699
|
stdnum/at/zvr_zahl.py
|
stdnum/at/zvr_zahl.py
|
# zvr_zahl.py - functions for handling Austrian association register numbers
# coding: utf-8
#
# Copyright (C) 2017 Holvi Payment Services Oy
# Copyright (C) 2017 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
""" ZVR-Zahl (Zentrales Vereinsregister Zahl or ZVR-Zahl or
association registery number)
The number is givin to associations by the Association register to identify
with. The number is 9 character long and is givin on a running bases.
No known checksum, this module will just check if it is clean and all digits.
>>> validate('123456789')
'123456789'
>>> validate('0123456789')
Traceback (most recent call last):
...
InvalidLength: ...
validate('A12345678')
Traceback (most recent call last):
...
InvalidFormat: ...
>>> is_valid('123456789')
True
>>> is_valid('1234567890')
False
"""
from stdnum.exceptions import (
InvalidLength,
InvalidFormat,
ValidationError
)
from stdnum.util import clean
def compact(number):
"""Convert the number to the minimal representation. This removes
surrounding whitespace and raise an error on junk letters."""
return clean(number, ' ').strip()
def validate(number):
"""Checks to see if the number provided is a valid association register
number.
This checks only the formatting."""
number = compact(number)
if len(number) > 9:
raise InvalidLength()
if not number.isdigit():
raise InvalidFormat()
return number
def is_valid(number):
"""Return boolean value of the association registery number validity"""
try:
return bool(validate(number))
except ValidationError:
return False
|
Implement validator for Austrian association register number
|
Implement validator for Austrian association register number
|
Python
|
lgpl-2.1
|
holvi/python-stdnum,holvi/python-stdnum,holvi/python-stdnum
|
Implement validator for Austrian association register number
|
# zvr_zahl.py - functions for handling Austrian association register numbers
# coding: utf-8
#
# Copyright (C) 2017 Holvi Payment Services Oy
# Copyright (C) 2017 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
""" ZVR-Zahl (Zentrales Vereinsregister Zahl or ZVR-Zahl or
association registery number)
The number is givin to associations by the Association register to identify
with. The number is 9 character long and is givin on a running bases.
No known checksum, this module will just check if it is clean and all digits.
>>> validate('123456789')
'123456789'
>>> validate('0123456789')
Traceback (most recent call last):
...
InvalidLength: ...
validate('A12345678')
Traceback (most recent call last):
...
InvalidFormat: ...
>>> is_valid('123456789')
True
>>> is_valid('1234567890')
False
"""
from stdnum.exceptions import (
InvalidLength,
InvalidFormat,
ValidationError
)
from stdnum.util import clean
def compact(number):
"""Convert the number to the minimal representation. This removes
surrounding whitespace and raise an error on junk letters."""
return clean(number, ' ').strip()
def validate(number):
"""Checks to see if the number provided is a valid association register
number.
This checks only the formatting."""
number = compact(number)
if len(number) > 9:
raise InvalidLength()
if not number.isdigit():
raise InvalidFormat()
return number
def is_valid(number):
"""Return boolean value of the association registery number validity"""
try:
return bool(validate(number))
except ValidationError:
return False
|
<commit_before><commit_msg>Implement validator for Austrian association register number<commit_after>
|
# zvr_zahl.py - functions for handling Austrian association register numbers
# coding: utf-8
#
# Copyright (C) 2017 Holvi Payment Services Oy
# Copyright (C) 2017 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
""" ZVR-Zahl (Zentrales Vereinsregister Zahl or ZVR-Zahl or
association registery number)
The number is givin to associations by the Association register to identify
with. The number is 9 character long and is givin on a running bases.
No known checksum, this module will just check if it is clean and all digits.
>>> validate('123456789')
'123456789'
>>> validate('0123456789')
Traceback (most recent call last):
...
InvalidLength: ...
validate('A12345678')
Traceback (most recent call last):
...
InvalidFormat: ...
>>> is_valid('123456789')
True
>>> is_valid('1234567890')
False
"""
from stdnum.exceptions import (
InvalidLength,
InvalidFormat,
ValidationError
)
from stdnum.util import clean
def compact(number):
"""Convert the number to the minimal representation. This removes
surrounding whitespace and raise an error on junk letters."""
return clean(number, ' ').strip()
def validate(number):
"""Checks to see if the number provided is a valid association register
number.
This checks only the formatting."""
number = compact(number)
if len(number) > 9:
raise InvalidLength()
if not number.isdigit():
raise InvalidFormat()
return number
def is_valid(number):
"""Return boolean value of the association registery number validity"""
try:
return bool(validate(number))
except ValidationError:
return False
|
Implement validator for Austrian association register number# zvr_zahl.py - functions for handling Austrian association register numbers
# coding: utf-8
#
# Copyright (C) 2017 Holvi Payment Services Oy
# Copyright (C) 2017 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
""" ZVR-Zahl (Zentrales Vereinsregister Zahl or ZVR-Zahl or
association registery number)
The number is givin to associations by the Association register to identify
with. The number is 9 character long and is givin on a running bases.
No known checksum, this module will just check if it is clean and all digits.
>>> validate('123456789')
'123456789'
>>> validate('0123456789')
Traceback (most recent call last):
...
InvalidLength: ...
validate('A12345678')
Traceback (most recent call last):
...
InvalidFormat: ...
>>> is_valid('123456789')
True
>>> is_valid('1234567890')
False
"""
from stdnum.exceptions import (
InvalidLength,
InvalidFormat,
ValidationError
)
from stdnum.util import clean
def compact(number):
"""Convert the number to the minimal representation. This removes
surrounding whitespace and raise an error on junk letters."""
return clean(number, ' ').strip()
def validate(number):
"""Checks to see if the number provided is a valid association register
number.
This checks only the formatting."""
number = compact(number)
if len(number) > 9:
raise InvalidLength()
if not number.isdigit():
raise InvalidFormat()
return number
def is_valid(number):
"""Return boolean value of the association registery number validity"""
try:
return bool(validate(number))
except ValidationError:
return False
|
<commit_before><commit_msg>Implement validator for Austrian association register number<commit_after># zvr_zahl.py - functions for handling Austrian association register numbers
# coding: utf-8
#
# Copyright (C) 2017 Holvi Payment Services Oy
# Copyright (C) 2017 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
""" ZVR-Zahl (Zentrales Vereinsregister Zahl or ZVR-Zahl or
association registery number)
The number is givin to associations by the Association register to identify
with. The number is 9 character long and is givin on a running bases.
No known checksum, this module will just check if it is clean and all digits.
>>> validate('123456789')
'123456789'
>>> validate('0123456789')
Traceback (most recent call last):
...
InvalidLength: ...
validate('A12345678')
Traceback (most recent call last):
...
InvalidFormat: ...
>>> is_valid('123456789')
True
>>> is_valid('1234567890')
False
"""
from stdnum.exceptions import (
InvalidLength,
InvalidFormat,
ValidationError
)
from stdnum.util import clean
def compact(number):
"""Convert the number to the minimal representation. This removes
surrounding whitespace and raise an error on junk letters."""
return clean(number, ' ').strip()
def validate(number):
"""Checks to see if the number provided is a valid association register
number.
This checks only the formatting."""
number = compact(number)
if len(number) > 9:
raise InvalidLength()
if not number.isdigit():
raise InvalidFormat()
return number
def is_valid(number):
"""Return boolean value of the association registery number validity"""
try:
return bool(validate(number))
except ValidationError:
return False
|
|
1615fafde907488c9af7b40ea2f4ee02b4e05507
|
saleor/dashboard/discount/forms.py
|
saleor/dashboard/discount/forms.py
|
from django import forms
from django.utils.translation import pgettext_lazy
from ...product.models import Discount
class DiscountForm(forms.ModelForm):
class Meta:
model = Discount
exclude = []
def clean(self):
cleaned_data = super(DiscountForm, self).clean()
type = cleaned_data['type']
value = cleaned_data['value']
if type == Discount.PERCENTAGE and value > 100:
self.add_error('value', pgettext_lazy('discount error',
'Percentage discount '
'cannot be higher than 100%'))
# TODO: Implement cost price checks
return cleaned_data
|
from django import forms
from django.core.exceptions import NON_FIELD_ERRORS
from django.utils.translation import pgettext_lazy
from ...product.models import Discount
class DiscountForm(forms.ModelForm):
class Meta:
model = Discount
exclude = []
def clean(self):
cleaned_data = super(DiscountForm, self).clean()
discount_type = cleaned_data['type']
apply_on = cleaned_data['apply_on']
value = cleaned_data['value']
required_msg = pgettext_lazy('discount error', 'This field is required')
if discount_type == Discount.PERCENTAGE and value > 100:
self.add_error('value', pgettext_lazy('discount error',
'Percentage discount '
'cannot be higher than 100%'))
if (apply_on == Discount.APPLY_ON_PRODUCTS and not
cleaned_data['products']):
self.add_error('products', required_msg)
elif (apply_on == Discount.APPLY_ON_CATEGORIES and not
cleaned_data['categories']):
self.add_error('categories', required_msg)
elif apply_on == Discount.APPLY_ON_BOTH and not (
cleaned_data['products'] or cleaned_data['categories']):
self.add_error('products', required_msg)
self.add_error('categories', required_msg)
# TODO: Implement cost price checks
return cleaned_data
|
Add more detailed validation in dashboard
|
Add more detailed validation in dashboard
|
Python
|
bsd-3-clause
|
UITools/saleor,laosunhust/saleor,UITools/saleor,itbabu/saleor,jreigel/saleor,KenMutemi/saleor,laosunhust/saleor,laosunhust/saleor,car3oon/saleor,spartonia/saleor,UITools/saleor,maferelo/saleor,jreigel/saleor,rchav/vinerack,car3oon/saleor,UITools/saleor,rodrigozn/CW-Shop,maferelo/saleor,rchav/vinerack,laosunhust/saleor,tfroehlich82/saleor,maferelo/saleor,KenMutemi/saleor,spartonia/saleor,HyperManTT/ECommerceSaleor,KenMutemi/saleor,car3oon/saleor,jreigel/saleor,rodrigozn/CW-Shop,mociepka/saleor,mociepka/saleor,tfroehlich82/saleor,spartonia/saleor,HyperManTT/ECommerceSaleor,itbabu/saleor,HyperManTT/ECommerceSaleor,spartonia/saleor,UITools/saleor,tfroehlich82/saleor,rodrigozn/CW-Shop,mociepka/saleor,rchav/vinerack,itbabu/saleor
|
from django import forms
from django.utils.translation import pgettext_lazy
from ...product.models import Discount
class DiscountForm(forms.ModelForm):
class Meta:
model = Discount
exclude = []
def clean(self):
cleaned_data = super(DiscountForm, self).clean()
type = cleaned_data['type']
value = cleaned_data['value']
if type == Discount.PERCENTAGE and value > 100:
self.add_error('value', pgettext_lazy('discount error',
'Percentage discount '
'cannot be higher than 100%'))
# TODO: Implement cost price checks
return cleaned_data
Add more detailed validation in dashboard
|
from django import forms
from django.core.exceptions import NON_FIELD_ERRORS
from django.utils.translation import pgettext_lazy
from ...product.models import Discount
class DiscountForm(forms.ModelForm):
class Meta:
model = Discount
exclude = []
def clean(self):
cleaned_data = super(DiscountForm, self).clean()
discount_type = cleaned_data['type']
apply_on = cleaned_data['apply_on']
value = cleaned_data['value']
required_msg = pgettext_lazy('discount error', 'This field is required')
if discount_type == Discount.PERCENTAGE and value > 100:
self.add_error('value', pgettext_lazy('discount error',
'Percentage discount '
'cannot be higher than 100%'))
if (apply_on == Discount.APPLY_ON_PRODUCTS and not
cleaned_data['products']):
self.add_error('products', required_msg)
elif (apply_on == Discount.APPLY_ON_CATEGORIES and not
cleaned_data['categories']):
self.add_error('categories', required_msg)
elif apply_on == Discount.APPLY_ON_BOTH and not (
cleaned_data['products'] or cleaned_data['categories']):
self.add_error('products', required_msg)
self.add_error('categories', required_msg)
# TODO: Implement cost price checks
return cleaned_data
|
<commit_before>from django import forms
from django.utils.translation import pgettext_lazy
from ...product.models import Discount
class DiscountForm(forms.ModelForm):
class Meta:
model = Discount
exclude = []
def clean(self):
cleaned_data = super(DiscountForm, self).clean()
type = cleaned_data['type']
value = cleaned_data['value']
if type == Discount.PERCENTAGE and value > 100:
self.add_error('value', pgettext_lazy('discount error',
'Percentage discount '
'cannot be higher than 100%'))
# TODO: Implement cost price checks
return cleaned_data
<commit_msg>Add more detailed validation in dashboard<commit_after>
|
from django import forms
from django.core.exceptions import NON_FIELD_ERRORS
from django.utils.translation import pgettext_lazy
from ...product.models import Discount
class DiscountForm(forms.ModelForm):
class Meta:
model = Discount
exclude = []
def clean(self):
cleaned_data = super(DiscountForm, self).clean()
discount_type = cleaned_data['type']
apply_on = cleaned_data['apply_on']
value = cleaned_data['value']
required_msg = pgettext_lazy('discount error', 'This field is required')
if discount_type == Discount.PERCENTAGE and value > 100:
self.add_error('value', pgettext_lazy('discount error',
'Percentage discount '
'cannot be higher than 100%'))
if (apply_on == Discount.APPLY_ON_PRODUCTS and not
cleaned_data['products']):
self.add_error('products', required_msg)
elif (apply_on == Discount.APPLY_ON_CATEGORIES and not
cleaned_data['categories']):
self.add_error('categories', required_msg)
elif apply_on == Discount.APPLY_ON_BOTH and not (
cleaned_data['products'] or cleaned_data['categories']):
self.add_error('products', required_msg)
self.add_error('categories', required_msg)
# TODO: Implement cost price checks
return cleaned_data
|
from django import forms
from django.utils.translation import pgettext_lazy
from ...product.models import Discount
class DiscountForm(forms.ModelForm):
class Meta:
model = Discount
exclude = []
def clean(self):
cleaned_data = super(DiscountForm, self).clean()
type = cleaned_data['type']
value = cleaned_data['value']
if type == Discount.PERCENTAGE and value > 100:
self.add_error('value', pgettext_lazy('discount error',
'Percentage discount '
'cannot be higher than 100%'))
# TODO: Implement cost price checks
return cleaned_data
Add more detailed validation in dashboardfrom django import forms
from django.core.exceptions import NON_FIELD_ERRORS
from django.utils.translation import pgettext_lazy
from ...product.models import Discount
class DiscountForm(forms.ModelForm):
class Meta:
model = Discount
exclude = []
def clean(self):
cleaned_data = super(DiscountForm, self).clean()
discount_type = cleaned_data['type']
apply_on = cleaned_data['apply_on']
value = cleaned_data['value']
required_msg = pgettext_lazy('discount error', 'This field is required')
if discount_type == Discount.PERCENTAGE and value > 100:
self.add_error('value', pgettext_lazy('discount error',
'Percentage discount '
'cannot be higher than 100%'))
if (apply_on == Discount.APPLY_ON_PRODUCTS and not
cleaned_data['products']):
self.add_error('products', required_msg)
elif (apply_on == Discount.APPLY_ON_CATEGORIES and not
cleaned_data['categories']):
self.add_error('categories', required_msg)
elif apply_on == Discount.APPLY_ON_BOTH and not (
cleaned_data['products'] or cleaned_data['categories']):
self.add_error('products', required_msg)
self.add_error('categories', required_msg)
# TODO: Implement cost price checks
return cleaned_data
|
<commit_before>from django import forms
from django.utils.translation import pgettext_lazy
from ...product.models import Discount
class DiscountForm(forms.ModelForm):
class Meta:
model = Discount
exclude = []
def clean(self):
cleaned_data = super(DiscountForm, self).clean()
type = cleaned_data['type']
value = cleaned_data['value']
if type == Discount.PERCENTAGE and value > 100:
self.add_error('value', pgettext_lazy('discount error',
'Percentage discount '
'cannot be higher than 100%'))
# TODO: Implement cost price checks
return cleaned_data
<commit_msg>Add more detailed validation in dashboard<commit_after>from django import forms
from django.core.exceptions import NON_FIELD_ERRORS
from django.utils.translation import pgettext_lazy
from ...product.models import Discount
class DiscountForm(forms.ModelForm):
class Meta:
model = Discount
exclude = []
def clean(self):
cleaned_data = super(DiscountForm, self).clean()
discount_type = cleaned_data['type']
apply_on = cleaned_data['apply_on']
value = cleaned_data['value']
required_msg = pgettext_lazy('discount error', 'This field is required')
if discount_type == Discount.PERCENTAGE and value > 100:
self.add_error('value', pgettext_lazy('discount error',
'Percentage discount '
'cannot be higher than 100%'))
if (apply_on == Discount.APPLY_ON_PRODUCTS and not
cleaned_data['products']):
self.add_error('products', required_msg)
elif (apply_on == Discount.APPLY_ON_CATEGORIES and not
cleaned_data['categories']):
self.add_error('categories', required_msg)
elif apply_on == Discount.APPLY_ON_BOTH and not (
cleaned_data['products'] or cleaned_data['categories']):
self.add_error('products', required_msg)
self.add_error('categories', required_msg)
# TODO: Implement cost price checks
return cleaned_data
|
34625c7e8817c6e979b59c8f8f3e37f0aaad56a2
|
dead_code_elim.py
|
dead_code_elim.py
|
"""Removes unused instructions.
The definition of "unused instruction" is an instruction having a return
ID that is not used by any non-debug and non-decoration instruction, and
does not have side effects."""
import spirv
def remove_debug_if_dead(module, inst):
"""Remove debug instruction if it is not used."""
assert inst.op_name in spirv.DEBUG_INSTRUCTIONS
if inst.op_name != 'OpString':
if inst.operands[0] not in module.id_to_inst:
inst.destroy()
def remove_decoration_if_dead(module, inst):
"""Remove decoration instruction if it is not used."""
assert inst.op_name in spirv.DECORATION_INSTRUCTIONS
if inst.op_name != 'OpDecorationGroup':
if inst.operands[0] not in module.id_to_inst:
inst.destroy()
def optimize(module):
"""Remove all unused instructions."""
# Garbage collect old unused debug and decoration instructions.
# This is done before the real pass because:
# * They need some special handling, as they do not have inst.result_id
# * They come in the wrong order with regard to constants, so we would
# need extra code in the real pass to ensure constants used in OpLine
# are removed.
# Note: the debug and decoration instructions that are live at the start
# of this pass is handled by the real pass when the instruction they
# point to is removed.
for inst in reversed(module.global_insts[:]):
if inst.op_name in spirv.DEBUG_INSTRUCTIONS:
remove_debug_if_dead(module, inst)
elif inst.op_name in spirv.DECORATION_INSTRUCTIONS:
remove_decoration_if_dead(module, inst)
# Remove unused instructions.
for inst in module.instructions_reversed():
if not inst.has_side_effect() and not inst.uses():
inst.destroy()
module.finalize()
|
Add a dead code elimination optimization pass.
|
Add a dead code elimination optimization pass.
|
Python
|
mit
|
kristerw/spirv-tools
|
Add a dead code elimination optimization pass.
|
"""Removes unused instructions.
The definition of "unused instruction" is an instruction having a return
ID that is not used by any non-debug and non-decoration instruction, and
does not have side effects."""
import spirv
def remove_debug_if_dead(module, inst):
"""Remove debug instruction if it is not used."""
assert inst.op_name in spirv.DEBUG_INSTRUCTIONS
if inst.op_name != 'OpString':
if inst.operands[0] not in module.id_to_inst:
inst.destroy()
def remove_decoration_if_dead(module, inst):
"""Remove decoration instruction if it is not used."""
assert inst.op_name in spirv.DECORATION_INSTRUCTIONS
if inst.op_name != 'OpDecorationGroup':
if inst.operands[0] not in module.id_to_inst:
inst.destroy()
def optimize(module):
"""Remove all unused instructions."""
# Garbage collect old unused debug and decoration instructions.
# This is done before the real pass because:
# * They need some special handling, as they do not have inst.result_id
# * They come in the wrong order with regard to constants, so we would
# need extra code in the real pass to ensure constants used in OpLine
# are removed.
# Note: the debug and decoration instructions that are live at the start
# of this pass is handled by the real pass when the instruction they
# point to is removed.
for inst in reversed(module.global_insts[:]):
if inst.op_name in spirv.DEBUG_INSTRUCTIONS:
remove_debug_if_dead(module, inst)
elif inst.op_name in spirv.DECORATION_INSTRUCTIONS:
remove_decoration_if_dead(module, inst)
# Remove unused instructions.
for inst in module.instructions_reversed():
if not inst.has_side_effect() and not inst.uses():
inst.destroy()
module.finalize()
|
<commit_before><commit_msg>Add a dead code elimination optimization pass.<commit_after>
|
"""Removes unused instructions.
The definition of "unused instruction" is an instruction having a return
ID that is not used by any non-debug and non-decoration instruction, and
does not have side effects."""
import spirv
def remove_debug_if_dead(module, inst):
"""Remove debug instruction if it is not used."""
assert inst.op_name in spirv.DEBUG_INSTRUCTIONS
if inst.op_name != 'OpString':
if inst.operands[0] not in module.id_to_inst:
inst.destroy()
def remove_decoration_if_dead(module, inst):
"""Remove decoration instruction if it is not used."""
assert inst.op_name in spirv.DECORATION_INSTRUCTIONS
if inst.op_name != 'OpDecorationGroup':
if inst.operands[0] not in module.id_to_inst:
inst.destroy()
def optimize(module):
"""Remove all unused instructions."""
# Garbage collect old unused debug and decoration instructions.
# This is done before the real pass because:
# * They need some special handling, as they do not have inst.result_id
# * They come in the wrong order with regard to constants, so we would
# need extra code in the real pass to ensure constants used in OpLine
# are removed.
# Note: the debug and decoration instructions that are live at the start
# of this pass is handled by the real pass when the instruction they
# point to is removed.
for inst in reversed(module.global_insts[:]):
if inst.op_name in spirv.DEBUG_INSTRUCTIONS:
remove_debug_if_dead(module, inst)
elif inst.op_name in spirv.DECORATION_INSTRUCTIONS:
remove_decoration_if_dead(module, inst)
# Remove unused instructions.
for inst in module.instructions_reversed():
if not inst.has_side_effect() and not inst.uses():
inst.destroy()
module.finalize()
|
Add a dead code elimination optimization pass."""Removes unused instructions.
The definition of "unused instruction" is an instruction having a return
ID that is not used by any non-debug and non-decoration instruction, and
does not have side effects."""
import spirv
def remove_debug_if_dead(module, inst):
"""Remove debug instruction if it is not used."""
assert inst.op_name in spirv.DEBUG_INSTRUCTIONS
if inst.op_name != 'OpString':
if inst.operands[0] not in module.id_to_inst:
inst.destroy()
def remove_decoration_if_dead(module, inst):
"""Remove decoration instruction if it is not used."""
assert inst.op_name in spirv.DECORATION_INSTRUCTIONS
if inst.op_name != 'OpDecorationGroup':
if inst.operands[0] not in module.id_to_inst:
inst.destroy()
def optimize(module):
"""Remove all unused instructions."""
# Garbage collect old unused debug and decoration instructions.
# This is done before the real pass because:
# * They need some special handling, as they do not have inst.result_id
# * They come in the wrong order with regard to constants, so we would
# need extra code in the real pass to ensure constants used in OpLine
# are removed.
# Note: the debug and decoration instructions that are live at the start
# of this pass is handled by the real pass when the instruction they
# point to is removed.
for inst in reversed(module.global_insts[:]):
if inst.op_name in spirv.DEBUG_INSTRUCTIONS:
remove_debug_if_dead(module, inst)
elif inst.op_name in spirv.DECORATION_INSTRUCTIONS:
remove_decoration_if_dead(module, inst)
# Remove unused instructions.
for inst in module.instructions_reversed():
if not inst.has_side_effect() and not inst.uses():
inst.destroy()
module.finalize()
|
<commit_before><commit_msg>Add a dead code elimination optimization pass.<commit_after>"""Removes unused instructions.
The definition of "unused instruction" is an instruction having a return
ID that is not used by any non-debug and non-decoration instruction, and
does not have side effects."""
import spirv
def remove_debug_if_dead(module, inst):
"""Remove debug instruction if it is not used."""
assert inst.op_name in spirv.DEBUG_INSTRUCTIONS
if inst.op_name != 'OpString':
if inst.operands[0] not in module.id_to_inst:
inst.destroy()
def remove_decoration_if_dead(module, inst):
"""Remove decoration instruction if it is not used."""
assert inst.op_name in spirv.DECORATION_INSTRUCTIONS
if inst.op_name != 'OpDecorationGroup':
if inst.operands[0] not in module.id_to_inst:
inst.destroy()
def optimize(module):
"""Remove all unused instructions."""
# Garbage collect old unused debug and decoration instructions.
# This is done before the real pass because:
# * They need some special handling, as they do not have inst.result_id
# * They come in the wrong order with regard to constants, so we would
# need extra code in the real pass to ensure constants used in OpLine
# are removed.
# Note: the debug and decoration instructions that are live at the start
# of this pass is handled by the real pass when the instruction they
# point to is removed.
for inst in reversed(module.global_insts[:]):
if inst.op_name in spirv.DEBUG_INSTRUCTIONS:
remove_debug_if_dead(module, inst)
elif inst.op_name in spirv.DECORATION_INSTRUCTIONS:
remove_decoration_if_dead(module, inst)
# Remove unused instructions.
for inst in module.instructions_reversed():
if not inst.has_side_effect() and not inst.uses():
inst.destroy()
module.finalize()
|
|
eb40246064d5185edf1d620dcf7270ffe9d7c074
|
tools/test-generator.py
|
tools/test-generator.py
|
#!/usr/bin/python
import sys
import math
import urllib
import urllib2
import time
id = '123456789012345'
server = 'http://localhost:5055'
period = 1
step = 0.001
waypoints = [
(40.722412, -74.006288),
(40.728592, -74.005258),
(40.728348, -74.002822),
(40.725437, -73.996750),
(40.721778, -73.999818),
(40.723323, -74.002994)
]
points = []
for i in range(0, len(waypoints)):
(lat1, lon1) = waypoints[i]
(lat2, lon2) = waypoints[(i + 1) % len(waypoints)]
length = math.sqrt((lat2 - lat1) ** 2 + (lon2 - lon1) ** 2)
count = int(math.ceil(length / step))
for j in range(0, count):
lat = lat1 + (lat2 - lat1) * j / count
lon = lon1 + (lon2 - lon1) * j / count
points.append((lat, lon))
def send(lat, lon, course):
params = (('id', id), ('timestamp', int(time.time())), ('lat', lat), ('lon', lon), ('bearing', course))
urllib2.urlopen(server + '?' + urllib.urlencode(params)).read()
def course(lat1, lon1, lat2, lon2):
lat1 = lat1 * math.pi / 180
lon1 = lon1 * math.pi / 180
lat2 = lat2 * math.pi / 180
lon2 = lon2 * math.pi / 180
y = math.sin(lon2 - lon1) * math.cos(lat2)
x = math.cos(lat1) * math.sin(lat2) - math.sin(lat1) * math.cos(lat2) * math.cos(lon2 - lon1)
return (math.atan2(y, x) % (2 * math.pi)) * 180 / math.pi
index = 0
while True:
(lat1, lon1) = points[index % len(points)]
(lat2, lon2) = points[(index + 1) % len(points)]
send(lat1, lon1, course(lat1, lon1, lat2, lon2))
time.sleep(period)
index += 1
|
Create test data generator script
|
Create test data generator script
|
Python
|
apache-2.0
|
orcoliver/traccar,stalien/traccar_test,5of9/traccar,tsmgeek/traccar,ninioe/traccar,tsmgeek/traccar,jssenyange/traccar,vipien/traccar,stalien/traccar_test,renaudallard/traccar,AnshulJain1985/Roadcast-Tracker,orcoliver/traccar,duke2906/traccar,ninioe/traccar,tananaev/traccar,tsmgeek/traccar,renaudallard/traccar,joseant/traccar-1,duke2906/traccar,al3x1s/traccar,tananaev/traccar,5of9/traccar,vipien/traccar,jon-stumpf/traccar,al3x1s/traccar,ninioe/traccar,jssenyange/traccar,AnshulJain1985/Roadcast-Tracker,tananaev/traccar,jssenyange/traccar,jon-stumpf/traccar,orcoliver/traccar,joseant/traccar-1,jon-stumpf/traccar
|
Create test data generator script
|
#!/usr/bin/python
import sys
import math
import urllib
import urllib2
import time
id = '123456789012345'
server = 'http://localhost:5055'
period = 1
step = 0.001
waypoints = [
(40.722412, -74.006288),
(40.728592, -74.005258),
(40.728348, -74.002822),
(40.725437, -73.996750),
(40.721778, -73.999818),
(40.723323, -74.002994)
]
points = []
for i in range(0, len(waypoints)):
(lat1, lon1) = waypoints[i]
(lat2, lon2) = waypoints[(i + 1) % len(waypoints)]
length = math.sqrt((lat2 - lat1) ** 2 + (lon2 - lon1) ** 2)
count = int(math.ceil(length / step))
for j in range(0, count):
lat = lat1 + (lat2 - lat1) * j / count
lon = lon1 + (lon2 - lon1) * j / count
points.append((lat, lon))
def send(lat, lon, course):
params = (('id', id), ('timestamp', int(time.time())), ('lat', lat), ('lon', lon), ('bearing', course))
urllib2.urlopen(server + '?' + urllib.urlencode(params)).read()
def course(lat1, lon1, lat2, lon2):
lat1 = lat1 * math.pi / 180
lon1 = lon1 * math.pi / 180
lat2 = lat2 * math.pi / 180
lon2 = lon2 * math.pi / 180
y = math.sin(lon2 - lon1) * math.cos(lat2)
x = math.cos(lat1) * math.sin(lat2) - math.sin(lat1) * math.cos(lat2) * math.cos(lon2 - lon1)
return (math.atan2(y, x) % (2 * math.pi)) * 180 / math.pi
index = 0
while True:
(lat1, lon1) = points[index % len(points)]
(lat2, lon2) = points[(index + 1) % len(points)]
send(lat1, lon1, course(lat1, lon1, lat2, lon2))
time.sleep(period)
index += 1
|
<commit_before><commit_msg>Create test data generator script<commit_after>
|
#!/usr/bin/python
import sys
import math
import urllib
import urllib2
import time
id = '123456789012345'
server = 'http://localhost:5055'
period = 1
step = 0.001
waypoints = [
(40.722412, -74.006288),
(40.728592, -74.005258),
(40.728348, -74.002822),
(40.725437, -73.996750),
(40.721778, -73.999818),
(40.723323, -74.002994)
]
points = []
for i in range(0, len(waypoints)):
(lat1, lon1) = waypoints[i]
(lat2, lon2) = waypoints[(i + 1) % len(waypoints)]
length = math.sqrt((lat2 - lat1) ** 2 + (lon2 - lon1) ** 2)
count = int(math.ceil(length / step))
for j in range(0, count):
lat = lat1 + (lat2 - lat1) * j / count
lon = lon1 + (lon2 - lon1) * j / count
points.append((lat, lon))
def send(lat, lon, course):
params = (('id', id), ('timestamp', int(time.time())), ('lat', lat), ('lon', lon), ('bearing', course))
urllib2.urlopen(server + '?' + urllib.urlencode(params)).read()
def course(lat1, lon1, lat2, lon2):
lat1 = lat1 * math.pi / 180
lon1 = lon1 * math.pi / 180
lat2 = lat2 * math.pi / 180
lon2 = lon2 * math.pi / 180
y = math.sin(lon2 - lon1) * math.cos(lat2)
x = math.cos(lat1) * math.sin(lat2) - math.sin(lat1) * math.cos(lat2) * math.cos(lon2 - lon1)
return (math.atan2(y, x) % (2 * math.pi)) * 180 / math.pi
index = 0
while True:
(lat1, lon1) = points[index % len(points)]
(lat2, lon2) = points[(index + 1) % len(points)]
send(lat1, lon1, course(lat1, lon1, lat2, lon2))
time.sleep(period)
index += 1
|
Create test data generator script#!/usr/bin/python
import sys
import math
import urllib
import urllib2
import time
id = '123456789012345'
server = 'http://localhost:5055'
period = 1
step = 0.001
waypoints = [
(40.722412, -74.006288),
(40.728592, -74.005258),
(40.728348, -74.002822),
(40.725437, -73.996750),
(40.721778, -73.999818),
(40.723323, -74.002994)
]
points = []
for i in range(0, len(waypoints)):
(lat1, lon1) = waypoints[i]
(lat2, lon2) = waypoints[(i + 1) % len(waypoints)]
length = math.sqrt((lat2 - lat1) ** 2 + (lon2 - lon1) ** 2)
count = int(math.ceil(length / step))
for j in range(0, count):
lat = lat1 + (lat2 - lat1) * j / count
lon = lon1 + (lon2 - lon1) * j / count
points.append((lat, lon))
def send(lat, lon, course):
params = (('id', id), ('timestamp', int(time.time())), ('lat', lat), ('lon', lon), ('bearing', course))
urllib2.urlopen(server + '?' + urllib.urlencode(params)).read()
def course(lat1, lon1, lat2, lon2):
lat1 = lat1 * math.pi / 180
lon1 = lon1 * math.pi / 180
lat2 = lat2 * math.pi / 180
lon2 = lon2 * math.pi / 180
y = math.sin(lon2 - lon1) * math.cos(lat2)
x = math.cos(lat1) * math.sin(lat2) - math.sin(lat1) * math.cos(lat2) * math.cos(lon2 - lon1)
return (math.atan2(y, x) % (2 * math.pi)) * 180 / math.pi
index = 0
while True:
(lat1, lon1) = points[index % len(points)]
(lat2, lon2) = points[(index + 1) % len(points)]
send(lat1, lon1, course(lat1, lon1, lat2, lon2))
time.sleep(period)
index += 1
|
<commit_before><commit_msg>Create test data generator script<commit_after>#!/usr/bin/python
import sys
import math
import urllib
import urllib2
import time
id = '123456789012345'
server = 'http://localhost:5055'
period = 1
step = 0.001
waypoints = [
(40.722412, -74.006288),
(40.728592, -74.005258),
(40.728348, -74.002822),
(40.725437, -73.996750),
(40.721778, -73.999818),
(40.723323, -74.002994)
]
points = []
for i in range(0, len(waypoints)):
(lat1, lon1) = waypoints[i]
(lat2, lon2) = waypoints[(i + 1) % len(waypoints)]
length = math.sqrt((lat2 - lat1) ** 2 + (lon2 - lon1) ** 2)
count = int(math.ceil(length / step))
for j in range(0, count):
lat = lat1 + (lat2 - lat1) * j / count
lon = lon1 + (lon2 - lon1) * j / count
points.append((lat, lon))
def send(lat, lon, course):
params = (('id', id), ('timestamp', int(time.time())), ('lat', lat), ('lon', lon), ('bearing', course))
urllib2.urlopen(server + '?' + urllib.urlencode(params)).read()
def course(lat1, lon1, lat2, lon2):
lat1 = lat1 * math.pi / 180
lon1 = lon1 * math.pi / 180
lat2 = lat2 * math.pi / 180
lon2 = lon2 * math.pi / 180
y = math.sin(lon2 - lon1) * math.cos(lat2)
x = math.cos(lat1) * math.sin(lat2) - math.sin(lat1) * math.cos(lat2) * math.cos(lon2 - lon1)
return (math.atan2(y, x) % (2 * math.pi)) * 180 / math.pi
index = 0
while True:
(lat1, lon1) = points[index % len(points)]
(lat2, lon2) = points[(index + 1) % len(points)]
send(lat1, lon1, course(lat1, lon1, lat2, lon2))
time.sleep(period)
index += 1
|
|
7b6975e8bfa35ca211f407db1b9399bc8bb766da
|
test_accelerometer.py
|
test_accelerometer.py
|
from microbit import accelerometer as acc, sleep
tx = 10
ty = 10
tz = 40
x = y = z = 0
while True:
nx, ny, nz = acc.get_values()
if abs(nx - x) >= tx or abs(ny - y) >= ty or abs(nz - z) >= tz:
x, y, z = nx, ny, nz
print(x, y, z)
sleep(50)
|
Add small script to test micro:bit accelerometer
|
Add small script to test micro:bit accelerometer
|
Python
|
mit
|
SpotlightKid/microbit-worldtour-monifa
|
Add small script to test micro:bit accelerometer
|
from microbit import accelerometer as acc, sleep
tx = 10
ty = 10
tz = 40
x = y = z = 0
while True:
nx, ny, nz = acc.get_values()
if abs(nx - x) >= tx or abs(ny - y) >= ty or abs(nz - z) >= tz:
x, y, z = nx, ny, nz
print(x, y, z)
sleep(50)
|
<commit_before><commit_msg>Add small script to test micro:bit accelerometer<commit_after>
|
from microbit import accelerometer as acc, sleep
tx = 10
ty = 10
tz = 40
x = y = z = 0
while True:
nx, ny, nz = acc.get_values()
if abs(nx - x) >= tx or abs(ny - y) >= ty or abs(nz - z) >= tz:
x, y, z = nx, ny, nz
print(x, y, z)
sleep(50)
|
Add small script to test micro:bit accelerometerfrom microbit import accelerometer as acc, sleep
tx = 10
ty = 10
tz = 40
x = y = z = 0
while True:
nx, ny, nz = acc.get_values()
if abs(nx - x) >= tx or abs(ny - y) >= ty or abs(nz - z) >= tz:
x, y, z = nx, ny, nz
print(x, y, z)
sleep(50)
|
<commit_before><commit_msg>Add small script to test micro:bit accelerometer<commit_after>from microbit import accelerometer as acc, sleep
tx = 10
ty = 10
tz = 40
x = y = z = 0
while True:
nx, ny, nz = acc.get_values()
if abs(nx - x) >= tx or abs(ny - y) >= ty or abs(nz - z) >= tz:
x, y, z = nx, ny, nz
print(x, y, z)
sleep(50)
|
|
4f246ae37b060e677c3c3cd7f6dcdc2f21337cf6
|
dakota/dakota_utils.py
|
dakota/dakota_utils.py
|
#! /usr/bin/env python
"""Helper functions for processing Dakota parameter and results files."""
import re
def get_response_descriptors(params_file):
"""Extract response descriptors from a Dakota parameters file.
Parameters
----------
params_file : str
The path to a Dakota parameters file.
Returns
-------
list
A list of response descriptors for the Dakota experiment.
"""
labels = []
try:
with open(params_file, 'r') as fp:
for line in fp:
if re.search('ASV_', line):
labels.append(''.join(re.findall(':(\S+)', line)))
except IOError:
return None
else:
return(labels)
def get_analysis_components(params_file):
"""Extract the analysis components from a Dakota parameters file.
The analysis components are returned as a list. First is the name
of the model being run by Dakota, followed by dicts containing an
output file to analyze and the statistic to apply to the file.
Parameters
----------
params_file : str
The path to a Dakota parameters file.
Returns
-------
list
A list of analysis components for the Dakota experiment.
Examples
--------
Extract the analysis components from a Dakota parameters file:
>>> ac = get_analysis_components(params_file)
>>> ac.pop(0)
'hydrotrend'
>>> ac.pop(0)
{'file': 'HYDROASCII.QS', 'statistic': 'median'}
Notes
-----
The syntax expected by this function is defined in the Dakota
input file; e.g., for the example cited above, the 'interface'
section of the input file contains the line:
analysis_components = 'hydrotrend' 'HYDROASCII.QS:median'
"""
ac = []
try:
with open(params_file, 'r') as fp:
for line in fp:
if re.search('AC_1', line):
ac.append(line.split('AC_1')[0].strip())
elif re.search('AC_', line):
parts = re.split(':', re.split('AC_', line)[0])
ac.append({'file':parts[0].strip(),
'statistic':parts[1].strip()})
except IOError:
return None
else:
return(ac)
def write_results(results_file, array, labels):
"""Write a Dakota results file from an input numpy array."""
try:
with open(results_file, 'w') as fp:
for i in range(len(array)):
fp.write('{0s}\t{1}\n'.format(array[i], labels[i]))
except IOError:
raise
|
Break off helper functions from dakota.py
|
Break off helper functions from dakota.py
|
Python
|
mit
|
csdms/dakota,csdms/dakota
|
Break off helper functions from dakota.py
|
#! /usr/bin/env python
"""Helper functions for processing Dakota parameter and results files."""
import re
def get_response_descriptors(params_file):
"""Extract response descriptors from a Dakota parameters file.
Parameters
----------
params_file : str
The path to a Dakota parameters file.
Returns
-------
list
A list of response descriptors for the Dakota experiment.
"""
labels = []
try:
with open(params_file, 'r') as fp:
for line in fp:
if re.search('ASV_', line):
labels.append(''.join(re.findall(':(\S+)', line)))
except IOError:
return None
else:
return(labels)
def get_analysis_components(params_file):
"""Extract the analysis components from a Dakota parameters file.
The analysis components are returned as a list. First is the name
of the model being run by Dakota, followed by dicts containing an
output file to analyze and the statistic to apply to the file.
Parameters
----------
params_file : str
The path to a Dakota parameters file.
Returns
-------
list
A list of analysis components for the Dakota experiment.
Examples
--------
Extract the analysis components from a Dakota parameters file:
>>> ac = get_analysis_components(params_file)
>>> ac.pop(0)
'hydrotrend'
>>> ac.pop(0)
{'file': 'HYDROASCII.QS', 'statistic': 'median'}
Notes
-----
The syntax expected by this function is defined in the Dakota
input file; e.g., for the example cited above, the 'interface'
section of the input file contains the line:
analysis_components = 'hydrotrend' 'HYDROASCII.QS:median'
"""
ac = []
try:
with open(params_file, 'r') as fp:
for line in fp:
if re.search('AC_1', line):
ac.append(line.split('AC_1')[0].strip())
elif re.search('AC_', line):
parts = re.split(':', re.split('AC_', line)[0])
ac.append({'file':parts[0].strip(),
'statistic':parts[1].strip()})
except IOError:
return None
else:
return(ac)
def write_results(results_file, array, labels):
"""Write a Dakota results file from an input numpy array."""
try:
with open(results_file, 'w') as fp:
for i in range(len(array)):
fp.write('{0s}\t{1}\n'.format(array[i], labels[i]))
except IOError:
raise
|
<commit_before><commit_msg>Break off helper functions from dakota.py<commit_after>
|
#! /usr/bin/env python
"""Helper functions for processing Dakota parameter and results files."""
import re
def get_response_descriptors(params_file):
"""Extract response descriptors from a Dakota parameters file.
Parameters
----------
params_file : str
The path to a Dakota parameters file.
Returns
-------
list
A list of response descriptors for the Dakota experiment.
"""
labels = []
try:
with open(params_file, 'r') as fp:
for line in fp:
if re.search('ASV_', line):
labels.append(''.join(re.findall(':(\S+)', line)))
except IOError:
return None
else:
return(labels)
def get_analysis_components(params_file):
"""Extract the analysis components from a Dakota parameters file.
The analysis components are returned as a list. First is the name
of the model being run by Dakota, followed by dicts containing an
output file to analyze and the statistic to apply to the file.
Parameters
----------
params_file : str
The path to a Dakota parameters file.
Returns
-------
list
A list of analysis components for the Dakota experiment.
Examples
--------
Extract the analysis components from a Dakota parameters file:
>>> ac = get_analysis_components(params_file)
>>> ac.pop(0)
'hydrotrend'
>>> ac.pop(0)
{'file': 'HYDROASCII.QS', 'statistic': 'median'}
Notes
-----
The syntax expected by this function is defined in the Dakota
input file; e.g., for the example cited above, the 'interface'
section of the input file contains the line:
analysis_components = 'hydrotrend' 'HYDROASCII.QS:median'
"""
ac = []
try:
with open(params_file, 'r') as fp:
for line in fp:
if re.search('AC_1', line):
ac.append(line.split('AC_1')[0].strip())
elif re.search('AC_', line):
parts = re.split(':', re.split('AC_', line)[0])
ac.append({'file':parts[0].strip(),
'statistic':parts[1].strip()})
except IOError:
return None
else:
return(ac)
def write_results(results_file, array, labels):
"""Write a Dakota results file from an input numpy array."""
try:
with open(results_file, 'w') as fp:
for i in range(len(array)):
fp.write('{0s}\t{1}\n'.format(array[i], labels[i]))
except IOError:
raise
|
Break off helper functions from dakota.py#! /usr/bin/env python
"""Helper functions for processing Dakota parameter and results files."""
import re
def get_response_descriptors(params_file):
"""Extract response descriptors from a Dakota parameters file.
Parameters
----------
params_file : str
The path to a Dakota parameters file.
Returns
-------
list
A list of response descriptors for the Dakota experiment.
"""
labels = []
try:
with open(params_file, 'r') as fp:
for line in fp:
if re.search('ASV_', line):
labels.append(''.join(re.findall(':(\S+)', line)))
except IOError:
return None
else:
return(labels)
def get_analysis_components(params_file):
"""Extract the analysis components from a Dakota parameters file.
The analysis components are returned as a list. First is the name
of the model being run by Dakota, followed by dicts containing an
output file to analyze and the statistic to apply to the file.
Parameters
----------
params_file : str
The path to a Dakota parameters file.
Returns
-------
list
A list of analysis components for the Dakota experiment.
Examples
--------
Extract the analysis components from a Dakota parameters file:
>>> ac = get_analysis_components(params_file)
>>> ac.pop(0)
'hydrotrend'
>>> ac.pop(0)
{'file': 'HYDROASCII.QS', 'statistic': 'median'}
Notes
-----
The syntax expected by this function is defined in the Dakota
input file; e.g., for the example cited above, the 'interface'
section of the input file contains the line:
analysis_components = 'hydrotrend' 'HYDROASCII.QS:median'
"""
ac = []
try:
with open(params_file, 'r') as fp:
for line in fp:
if re.search('AC_1', line):
ac.append(line.split('AC_1')[0].strip())
elif re.search('AC_', line):
parts = re.split(':', re.split('AC_', line)[0])
ac.append({'file':parts[0].strip(),
'statistic':parts[1].strip()})
except IOError:
return None
else:
return(ac)
def write_results(results_file, array, labels):
"""Write a Dakota results file from an input numpy array."""
try:
with open(results_file, 'w') as fp:
for i in range(len(array)):
fp.write('{0s}\t{1}\n'.format(array[i], labels[i]))
except IOError:
raise
|
<commit_before><commit_msg>Break off helper functions from dakota.py<commit_after>#! /usr/bin/env python
"""Helper functions for processing Dakota parameter and results files."""
import re
def get_response_descriptors(params_file):
"""Extract response descriptors from a Dakota parameters file.
Parameters
----------
params_file : str
The path to a Dakota parameters file.
Returns
-------
list
A list of response descriptors for the Dakota experiment.
"""
labels = []
try:
with open(params_file, 'r') as fp:
for line in fp:
if re.search('ASV_', line):
labels.append(''.join(re.findall(':(\S+)', line)))
except IOError:
return None
else:
return(labels)
def get_analysis_components(params_file):
"""Extract the analysis components from a Dakota parameters file.
The analysis components are returned as a list. First is the name
of the model being run by Dakota, followed by dicts containing an
output file to analyze and the statistic to apply to the file.
Parameters
----------
params_file : str
The path to a Dakota parameters file.
Returns
-------
list
A list of analysis components for the Dakota experiment.
Examples
--------
Extract the analysis components from a Dakota parameters file:
>>> ac = get_analysis_components(params_file)
>>> ac.pop(0)
'hydrotrend'
>>> ac.pop(0)
{'file': 'HYDROASCII.QS', 'statistic': 'median'}
Notes
-----
The syntax expected by this function is defined in the Dakota
input file; e.g., for the example cited above, the 'interface'
section of the input file contains the line:
analysis_components = 'hydrotrend' 'HYDROASCII.QS:median'
"""
ac = []
try:
with open(params_file, 'r') as fp:
for line in fp:
if re.search('AC_1', line):
ac.append(line.split('AC_1')[0].strip())
elif re.search('AC_', line):
parts = re.split(':', re.split('AC_', line)[0])
ac.append({'file':parts[0].strip(),
'statistic':parts[1].strip()})
except IOError:
return None
else:
return(ac)
def write_results(results_file, array, labels):
"""Write a Dakota results file from an input numpy array."""
try:
with open(results_file, 'w') as fp:
for i in range(len(array)):
fp.write('{0s}\t{1}\n'.format(array[i], labels[i]))
except IOError:
raise
|
|
548d3a3d2c1d853298628772643340bb6d96ee7a
|
tools/create_files.py
|
tools/create_files.py
|
import sys
import os
from random import choice
from random import seed
import string
def random_word():
return "".join([choice(string.lowercase)
for _ in range(choice(range(4, 10)))])
def random_line(n_words=10):
return " ".join([random_word() for _ in range(n_words)])
def random_text(n_lines=30, n_words=10):
return "\n".join([random_line(n_words) for _ in range(n_lines)])
def make_files(n_files=100, base_folder='.'):
for i in range(n_files):
path = os.path.join(
base_folder,
"File %04d.txt" % i)
if not os.path.exists(path):
print("Creating file: " + path)
open(path, 'wb').write(random_text())
if __name__ == "__main__":
seed(42)
base = sys.argv[1] if len(sys.argv) > 1 else '.'
n_files = sys.argv[2] if len(sys.argv) > 2 else 100
make_files(n_files=int(n_files), base_folder=base)
|
Add utility script to create a lot of files with random content inside a folder
|
NXP-16101: Add utility script to create a lot of files with random content inside a folder
|
Python
|
lgpl-2.1
|
arameshkumar/base-nuxeo-drive,IsaacYangSLA/nuxeo-drive,DirkHoffmann/nuxeo-drive,DirkHoffmann/nuxeo-drive,rsoumyassdi/nuxeo-drive,arameshkumar/nuxeo-drive,DirkHoffmann/nuxeo-drive,ssdi-drive/nuxeo-drive,DirkHoffmann/nuxeo-drive,rsoumyassdi/nuxeo-drive,arameshkumar/base-nuxeo-drive,arameshkumar/base-nuxeo-drive,DirkHoffmann/nuxeo-drive,loopingz/nuxeo-drive,IsaacYangSLA/nuxeo-drive,loopingz/nuxeo-drive,ssdi-drive/nuxeo-drive,loopingz/nuxeo-drive,IsaacYangSLA/nuxeo-drive,loopingz/nuxeo-drive,IsaacYangSLA/nuxeo-drive,arameshkumar/base-nuxeo-drive,rsoumyassdi/nuxeo-drive,arameshkumar/nuxeo-drive,IsaacYangSLA/nuxeo-drive,arameshkumar/nuxeo-drive,arameshkumar/nuxeo-drive,ssdi-drive/nuxeo-drive,rsoumyassdi/nuxeo-drive,loopingz/nuxeo-drive
|
NXP-16101: Add utility script to create a lot of files with random content inside a folder
|
import sys
import os
from random import choice
from random import seed
import string
def random_word():
return "".join([choice(string.lowercase)
for _ in range(choice(range(4, 10)))])
def random_line(n_words=10):
return " ".join([random_word() for _ in range(n_words)])
def random_text(n_lines=30, n_words=10):
return "\n".join([random_line(n_words) for _ in range(n_lines)])
def make_files(n_files=100, base_folder='.'):
for i in range(n_files):
path = os.path.join(
base_folder,
"File %04d.txt" % i)
if not os.path.exists(path):
print("Creating file: " + path)
open(path, 'wb').write(random_text())
if __name__ == "__main__":
seed(42)
base = sys.argv[1] if len(sys.argv) > 1 else '.'
n_files = sys.argv[2] if len(sys.argv) > 2 else 100
make_files(n_files=int(n_files), base_folder=base)
|
<commit_before><commit_msg>NXP-16101: Add utility script to create a lot of files with random content inside a folder<commit_after>
|
import sys
import os
from random import choice
from random import seed
import string
def random_word():
return "".join([choice(string.lowercase)
for _ in range(choice(range(4, 10)))])
def random_line(n_words=10):
return " ".join([random_word() for _ in range(n_words)])
def random_text(n_lines=30, n_words=10):
return "\n".join([random_line(n_words) for _ in range(n_lines)])
def make_files(n_files=100, base_folder='.'):
for i in range(n_files):
path = os.path.join(
base_folder,
"File %04d.txt" % i)
if not os.path.exists(path):
print("Creating file: " + path)
open(path, 'wb').write(random_text())
if __name__ == "__main__":
seed(42)
base = sys.argv[1] if len(sys.argv) > 1 else '.'
n_files = sys.argv[2] if len(sys.argv) > 2 else 100
make_files(n_files=int(n_files), base_folder=base)
|
NXP-16101: Add utility script to create a lot of files with random content inside a folderimport sys
import os
from random import choice
from random import seed
import string
def random_word():
return "".join([choice(string.lowercase)
for _ in range(choice(range(4, 10)))])
def random_line(n_words=10):
return " ".join([random_word() for _ in range(n_words)])
def random_text(n_lines=30, n_words=10):
return "\n".join([random_line(n_words) for _ in range(n_lines)])
def make_files(n_files=100, base_folder='.'):
for i in range(n_files):
path = os.path.join(
base_folder,
"File %04d.txt" % i)
if not os.path.exists(path):
print("Creating file: " + path)
open(path, 'wb').write(random_text())
if __name__ == "__main__":
seed(42)
base = sys.argv[1] if len(sys.argv) > 1 else '.'
n_files = sys.argv[2] if len(sys.argv) > 2 else 100
make_files(n_files=int(n_files), base_folder=base)
|
<commit_before><commit_msg>NXP-16101: Add utility script to create a lot of files with random content inside a folder<commit_after>import sys
import os
from random import choice
from random import seed
import string
def random_word():
return "".join([choice(string.lowercase)
for _ in range(choice(range(4, 10)))])
def random_line(n_words=10):
return " ".join([random_word() for _ in range(n_words)])
def random_text(n_lines=30, n_words=10):
return "\n".join([random_line(n_words) for _ in range(n_lines)])
def make_files(n_files=100, base_folder='.'):
for i in range(n_files):
path = os.path.join(
base_folder,
"File %04d.txt" % i)
if not os.path.exists(path):
print("Creating file: " + path)
open(path, 'wb').write(random_text())
if __name__ == "__main__":
seed(42)
base = sys.argv[1] if len(sys.argv) > 1 else '.'
n_files = sys.argv[2] if len(sys.argv) > 2 else 100
make_files(n_files=int(n_files), base_folder=base)
|
|
2e53b59e2466e121f27236c12b21f731ac18745c
|
scripts/crypto/cryptography_demo.py
|
scripts/crypto/cryptography_demo.py
|
from cryptography.fernet import Fernet
import sys
msg = sys.argv[1].encode("utf-8")
key = Fernet.generate_key()
print("Key: " + key.decode("ascii"))
f = Fernet(key)
token = f.encrypt(msg)
print("Encrypted: " + token.decode("utf-8"))
msg = f.decrypt(token)
print("Decrypted: " + msg.decode("utf-8"))
|
Add demo Fernet encryption demo
|
Add demo Fernet encryption demo
|
Python
|
mit
|
iluxonchik/python-general-repo
|
Add demo Fernet encryption demo
|
from cryptography.fernet import Fernet
import sys
msg = sys.argv[1].encode("utf-8")
key = Fernet.generate_key()
print("Key: " + key.decode("ascii"))
f = Fernet(key)
token = f.encrypt(msg)
print("Encrypted: " + token.decode("utf-8"))
msg = f.decrypt(token)
print("Decrypted: " + msg.decode("utf-8"))
|
<commit_before><commit_msg>Add demo Fernet encryption demo<commit_after>
|
from cryptography.fernet import Fernet
import sys
msg = sys.argv[1].encode("utf-8")
key = Fernet.generate_key()
print("Key: " + key.decode("ascii"))
f = Fernet(key)
token = f.encrypt(msg)
print("Encrypted: " + token.decode("utf-8"))
msg = f.decrypt(token)
print("Decrypted: " + msg.decode("utf-8"))
|
Add demo Fernet encryption demofrom cryptography.fernet import Fernet
import sys
msg = sys.argv[1].encode("utf-8")
key = Fernet.generate_key()
print("Key: " + key.decode("ascii"))
f = Fernet(key)
token = f.encrypt(msg)
print("Encrypted: " + token.decode("utf-8"))
msg = f.decrypt(token)
print("Decrypted: " + msg.decode("utf-8"))
|
<commit_before><commit_msg>Add demo Fernet encryption demo<commit_after>from cryptography.fernet import Fernet
import sys
msg = sys.argv[1].encode("utf-8")
key = Fernet.generate_key()
print("Key: " + key.decode("ascii"))
f = Fernet(key)
token = f.encrypt(msg)
print("Encrypted: " + token.decode("utf-8"))
msg = f.decrypt(token)
print("Decrypted: " + msg.decode("utf-8"))
|
|
c732496e054956898f414cf90b15b1fcf9b45b4f
|
pysovo/comms/comet.py
|
pysovo/comms/comet.py
|
# largely transplanted from comet-sendvo script:
# https://github.com/jdswinbank/Comet/blob/release-1.0/scripts/comet-sendvo
# Should track updates.
from __future__ import absolute_import
import logging
# Twisted
from twisted.python import usage
from twisted.internet import reactor
from twisted.internet.endpoints import clientFromString
# VOEvent transport protocol
from comet.tcp.protocol import VOEventSenderFactory
# Encapsulation of event
from comet.log import log
from comet.utility.xml import xml_document
import lxml.etree as ElementTree
import voeparse
logger = logging.getLogger(__name__)
class OneShotSender(VOEventSenderFactory):
"""
A factory that shuts down the reactor when we lose the connection to the
remote host. That either means that our event has been sent or that we
failed.
"""
def clientConnectionLost(self, connector, reason):
reactor.stop()
def clientConnectionFailed(self, connector, reason):
logger.warning("Connection failed")
reactor.stop()
def send_voevent(voevent, host='localhost', port=8098):
voevent = xml_document(voeparse.dumps(voevent))
try:
factory = OneShotSender(voevent)
except IOError:
logger.warning("Reading XML document failed")
reactor.callWhenRunning(reactor.stop)
except ElementTree.Error:
logger.warning("Could not parse event text")
reactor.callWhenRunning(reactor.stop)
else:
reactor.connectTCP(host, port, factory)
reactor.run()
# If our factory didn't get an acknowledgement of receipt, we'll raise:
if locals().has_key("factory") and factory.ack:
return
else:
raise RuntimeError("send voevent failed")
|
Send VOEvents by direct use of the Comet module.
|
Send VOEvents by direct use of the Comet module.
This works, but a more 'decoupled' approach via command line might be
more sensible - more robust to internal Comet interface changes,
better direct testing against manual command line entries.
|
Python
|
bsd-2-clause
|
timstaley/pysovo
|
Send VOEvents by direct use of the Comet module.
This works, but a more 'decoupled' approach via command line might be
more sensible - more robust to internal Comet interface changes,
better direct testing against manual command line entries.
|
# largely transplanted from comet-sendvo script:
# https://github.com/jdswinbank/Comet/blob/release-1.0/scripts/comet-sendvo
# Should track updates.
from __future__ import absolute_import
import logging
# Twisted
from twisted.python import usage
from twisted.internet import reactor
from twisted.internet.endpoints import clientFromString
# VOEvent transport protocol
from comet.tcp.protocol import VOEventSenderFactory
# Encapsulation of event
from comet.log import log
from comet.utility.xml import xml_document
import lxml.etree as ElementTree
import voeparse
logger = logging.getLogger(__name__)
class OneShotSender(VOEventSenderFactory):
"""
A factory that shuts down the reactor when we lose the connection to the
remote host. That either means that our event has been sent or that we
failed.
"""
def clientConnectionLost(self, connector, reason):
reactor.stop()
def clientConnectionFailed(self, connector, reason):
logger.warning("Connection failed")
reactor.stop()
def send_voevent(voevent, host='localhost', port=8098):
voevent = xml_document(voeparse.dumps(voevent))
try:
factory = OneShotSender(voevent)
except IOError:
logger.warning("Reading XML document failed")
reactor.callWhenRunning(reactor.stop)
except ElementTree.Error:
logger.warning("Could not parse event text")
reactor.callWhenRunning(reactor.stop)
else:
reactor.connectTCP(host, port, factory)
reactor.run()
# If our factory didn't get an acknowledgement of receipt, we'll raise:
if locals().has_key("factory") and factory.ack:
return
else:
raise RuntimeError("send voevent failed")
|
<commit_before><commit_msg>Send VOEvents by direct use of the Comet module.
This works, but a more 'decoupled' approach via command line might be
more sensible - more robust to internal Comet interface changes,
better direct testing against manual command line entries.<commit_after>
|
# largely transplanted from comet-sendvo script:
# https://github.com/jdswinbank/Comet/blob/release-1.0/scripts/comet-sendvo
# Should track updates.
from __future__ import absolute_import
import logging
# Twisted
from twisted.python import usage
from twisted.internet import reactor
from twisted.internet.endpoints import clientFromString
# VOEvent transport protocol
from comet.tcp.protocol import VOEventSenderFactory
# Encapsulation of event
from comet.log import log
from comet.utility.xml import xml_document
import lxml.etree as ElementTree
import voeparse
logger = logging.getLogger(__name__)
class OneShotSender(VOEventSenderFactory):
"""
A factory that shuts down the reactor when we lose the connection to the
remote host. That either means that our event has been sent or that we
failed.
"""
def clientConnectionLost(self, connector, reason):
reactor.stop()
def clientConnectionFailed(self, connector, reason):
logger.warning("Connection failed")
reactor.stop()
def send_voevent(voevent, host='localhost', port=8098):
voevent = xml_document(voeparse.dumps(voevent))
try:
factory = OneShotSender(voevent)
except IOError:
logger.warning("Reading XML document failed")
reactor.callWhenRunning(reactor.stop)
except ElementTree.Error:
logger.warning("Could not parse event text")
reactor.callWhenRunning(reactor.stop)
else:
reactor.connectTCP(host, port, factory)
reactor.run()
# If our factory didn't get an acknowledgement of receipt, we'll raise:
if locals().has_key("factory") and factory.ack:
return
else:
raise RuntimeError("send voevent failed")
|
Send VOEvents by direct use of the Comet module.
This works, but a more 'decoupled' approach via command line might be
more sensible - more robust to internal Comet interface changes,
better direct testing against manual command line entries.# largely transplanted from comet-sendvo script:
# https://github.com/jdswinbank/Comet/blob/release-1.0/scripts/comet-sendvo
# Should track updates.
from __future__ import absolute_import
import logging
# Twisted
from twisted.python import usage
from twisted.internet import reactor
from twisted.internet.endpoints import clientFromString
# VOEvent transport protocol
from comet.tcp.protocol import VOEventSenderFactory
# Encapsulation of event
from comet.log import log
from comet.utility.xml import xml_document
import lxml.etree as ElementTree
import voeparse
logger = logging.getLogger(__name__)
class OneShotSender(VOEventSenderFactory):
"""
A factory that shuts down the reactor when we lose the connection to the
remote host. That either means that our event has been sent or that we
failed.
"""
def clientConnectionLost(self, connector, reason):
reactor.stop()
def clientConnectionFailed(self, connector, reason):
logger.warning("Connection failed")
reactor.stop()
def send_voevent(voevent, host='localhost', port=8098):
voevent = xml_document(voeparse.dumps(voevent))
try:
factory = OneShotSender(voevent)
except IOError:
logger.warning("Reading XML document failed")
reactor.callWhenRunning(reactor.stop)
except ElementTree.Error:
logger.warning("Could not parse event text")
reactor.callWhenRunning(reactor.stop)
else:
reactor.connectTCP(host, port, factory)
reactor.run()
# If our factory didn't get an acknowledgement of receipt, we'll raise:
if locals().has_key("factory") and factory.ack:
return
else:
raise RuntimeError("send voevent failed")
|
<commit_before><commit_msg>Send VOEvents by direct use of the Comet module.
This works, but a more 'decoupled' approach via command line might be
more sensible - more robust to internal Comet interface changes,
better direct testing against manual command line entries.<commit_after># largely transplanted from comet-sendvo script:
# https://github.com/jdswinbank/Comet/blob/release-1.0/scripts/comet-sendvo
# Should track updates.
from __future__ import absolute_import
import logging
# Twisted
from twisted.python import usage
from twisted.internet import reactor
from twisted.internet.endpoints import clientFromString
# VOEvent transport protocol
from comet.tcp.protocol import VOEventSenderFactory
# Encapsulation of event
from comet.log import log
from comet.utility.xml import xml_document
import lxml.etree as ElementTree
import voeparse
logger = logging.getLogger(__name__)
class OneShotSender(VOEventSenderFactory):
"""
A factory that shuts down the reactor when we lose the connection to the
remote host. That either means that our event has been sent or that we
failed.
"""
def clientConnectionLost(self, connector, reason):
reactor.stop()
def clientConnectionFailed(self, connector, reason):
logger.warning("Connection failed")
reactor.stop()
def send_voevent(voevent, host='localhost', port=8098):
voevent = xml_document(voeparse.dumps(voevent))
try:
factory = OneShotSender(voevent)
except IOError:
logger.warning("Reading XML document failed")
reactor.callWhenRunning(reactor.stop)
except ElementTree.Error:
logger.warning("Could not parse event text")
reactor.callWhenRunning(reactor.stop)
else:
reactor.connectTCP(host, port, factory)
reactor.run()
# If our factory didn't get an acknowledgement of receipt, we'll raise:
if locals().has_key("factory") and factory.ack:
return
else:
raise RuntimeError("send voevent failed")
|
|
ef38a5fea94b6e824b8df87fa8a3370767151317
|
migrations/versions/0033.py
|
migrations/versions/0033.py
|
"""empty message
Revision ID: 0033 drop tickets.old_event_id
Revises: 0032 orders,tickets,ticket_types
Create Date: 2019-09-25 01:01:37.092066
"""
# revision identifiers, used by Alembic.
revision = '0033 drop tickets.old_event_id'
down_revision = '0032 orders,tickets,ticket_types'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('tickets', 'old_event_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('tickets', sa.Column('old_event_id', sa.INTEGER(), autoincrement=False, nullable=True))
# ### end Alembic commands ###
|
Update tickets to drop old_event_id
|
Update tickets to drop old_event_id
|
Python
|
mit
|
NewAcropolis/api,NewAcropolis/api,NewAcropolis/api
|
Update tickets to drop old_event_id
|
"""empty message
Revision ID: 0033 drop tickets.old_event_id
Revises: 0032 orders,tickets,ticket_types
Create Date: 2019-09-25 01:01:37.092066
"""
# revision identifiers, used by Alembic.
revision = '0033 drop tickets.old_event_id'
down_revision = '0032 orders,tickets,ticket_types'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('tickets', 'old_event_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('tickets', sa.Column('old_event_id', sa.INTEGER(), autoincrement=False, nullable=True))
# ### end Alembic commands ###
|
<commit_before><commit_msg>Update tickets to drop old_event_id<commit_after>
|
"""empty message
Revision ID: 0033 drop tickets.old_event_id
Revises: 0032 orders,tickets,ticket_types
Create Date: 2019-09-25 01:01:37.092066
"""
# revision identifiers, used by Alembic.
revision = '0033 drop tickets.old_event_id'
down_revision = '0032 orders,tickets,ticket_types'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('tickets', 'old_event_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('tickets', sa.Column('old_event_id', sa.INTEGER(), autoincrement=False, nullable=True))
# ### end Alembic commands ###
|
Update tickets to drop old_event_id"""empty message
Revision ID: 0033 drop tickets.old_event_id
Revises: 0032 orders,tickets,ticket_types
Create Date: 2019-09-25 01:01:37.092066
"""
# revision identifiers, used by Alembic.
revision = '0033 drop tickets.old_event_id'
down_revision = '0032 orders,tickets,ticket_types'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('tickets', 'old_event_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('tickets', sa.Column('old_event_id', sa.INTEGER(), autoincrement=False, nullable=True))
# ### end Alembic commands ###
|
<commit_before><commit_msg>Update tickets to drop old_event_id<commit_after>"""empty message
Revision ID: 0033 drop tickets.old_event_id
Revises: 0032 orders,tickets,ticket_types
Create Date: 2019-09-25 01:01:37.092066
"""
# revision identifiers, used by Alembic.
revision = '0033 drop tickets.old_event_id'
down_revision = '0032 orders,tickets,ticket_types'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('tickets', 'old_event_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('tickets', sa.Column('old_event_id', sa.INTEGER(), autoincrement=False, nullable=True))
# ### end Alembic commands ###
|
|
c8b8f7897bd4eb26f65480f90e0f6d71394f8971
|
sendcmd.py
|
sendcmd.py
|
#!/usr/bin/env python
import sys
import getmetric
def main():
output = getmetric.sshcmd(sys.argv[1], sys.argv[2])
print output
if __name__ == '__main__':
sys.exit(main())
|
Add utility for sending commands for testing
|
Add utility for sending commands for testing
|
Python
|
bsd-3-clause
|
ekollof/pymetrics
|
Add utility for sending commands for testing
|
#!/usr/bin/env python
import sys
import getmetric
def main():
output = getmetric.sshcmd(sys.argv[1], sys.argv[2])
print output
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add utility for sending commands for testing<commit_after>
|
#!/usr/bin/env python
import sys
import getmetric
def main():
output = getmetric.sshcmd(sys.argv[1], sys.argv[2])
print output
if __name__ == '__main__':
sys.exit(main())
|
Add utility for sending commands for testing#!/usr/bin/env python
import sys
import getmetric
def main():
output = getmetric.sshcmd(sys.argv[1], sys.argv[2])
print output
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add utility for sending commands for testing<commit_after>#!/usr/bin/env python
import sys
import getmetric
def main():
output = getmetric.sshcmd(sys.argv[1], sys.argv[2])
print output
if __name__ == '__main__':
sys.exit(main())
|
|
761ae0d762324ef1eba93ab1b9cf2cf28d2fa30e
|
python/snippets/find_if_program_installed.py
|
python/snippets/find_if_program_installed.py
|
def which(program):
"""From:
http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python"""
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
print(which('xixi'))
|
Add snippet for python -> find if program is installed in system
|
Add snippet for python -> find if program is installed in system
|
Python
|
mit
|
thescouser89/snippets,thescouser89/snippets,thescouser89/snippets,thescouser89/snippets,thescouser89/snippets,thescouser89/snippets
|
Add snippet for python -> find if program is installed in system
|
def which(program):
"""From:
http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python"""
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
print(which('xixi'))
|
<commit_before><commit_msg>Add snippet for python -> find if program is installed in system<commit_after>
|
def which(program):
"""From:
http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python"""
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
print(which('xixi'))
|
Add snippet for python -> find if program is installed in systemdef which(program):
"""From:
http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python"""
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
print(which('xixi'))
|
<commit_before><commit_msg>Add snippet for python -> find if program is installed in system<commit_after>def which(program):
"""From:
http://stackoverflow.com/questions/377017/test-if-executable-exists-in-python"""
import os
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
print(which('xixi'))
|
|
d24da0e339a0470b94fe79016a5343755640ba0f
|
deploy_prebuilt.py
|
deploy_prebuilt.py
|
#!/usr/bin/env python
import os
import shutil
script_dir = os.path.dirname(os.path.realpath(__file__))
chromium_dir = os.path.abspath(os.path.join(script_dir, 'src'))
# Solution root directory.
root_dir = os.path.abspath(os.path.join(script_dir, os.pardir))
base_libs = [
'base',
'base_i18n',
'base_prefs',
'base_static',
# Dependencies.
'dynamic_annotations',
'event',
'icudata',
'icui18n',
'icuuc',
'modp_b64',
'allocator_extension_thunks',
]
net_libs = [
'net',
]
libs = {
'base': base_libs,
'net': net_libs,
}
deploy_dir = os.path.join(script_dir, 'prebuilt')
ios_libs_dir = os.path.join(chromium_dir, 'xcodebuild', 'Debug-iphoneos')
def Copy(libs, to_path):
# Create dir if it's not exist.
if not os.path.exists(to_path):
os.makedirs(to_path)
for item in libs:
shutil.copy(item, to_path)
print 'Deploy', item
def GetLibs(dir, libs):
items = []
for item in libs:
lib = 'lib' + item + '.a'
items.append(os.path.join(dir, lib))
return items
def Deploy(module):
if os.path.exists(ios_libs_dir):
dir = os.path.join(deploy_dir, 'ios', 'armv7', module)
Copy(GetLibs(ios_libs_dir, libs[module]), dir)
for module in libs:
Deploy(module)
|
Add prebuilt libraries deployment helper script.
|
Add prebuilt libraries deployment helper script.
|
Python
|
mit
|
cybertk/libchromium,cybertk/libchromium,cybertk/libchromium,cybertk/libchromium
|
Add prebuilt libraries deployment helper script.
|
#!/usr/bin/env python
import os
import shutil
script_dir = os.path.dirname(os.path.realpath(__file__))
chromium_dir = os.path.abspath(os.path.join(script_dir, 'src'))
# Solution root directory.
root_dir = os.path.abspath(os.path.join(script_dir, os.pardir))
base_libs = [
'base',
'base_i18n',
'base_prefs',
'base_static',
# Dependencies.
'dynamic_annotations',
'event',
'icudata',
'icui18n',
'icuuc',
'modp_b64',
'allocator_extension_thunks',
]
net_libs = [
'net',
]
libs = {
'base': base_libs,
'net': net_libs,
}
deploy_dir = os.path.join(script_dir, 'prebuilt')
ios_libs_dir = os.path.join(chromium_dir, 'xcodebuild', 'Debug-iphoneos')
def Copy(libs, to_path):
# Create dir if it's not exist.
if not os.path.exists(to_path):
os.makedirs(to_path)
for item in libs:
shutil.copy(item, to_path)
print 'Deploy', item
def GetLibs(dir, libs):
items = []
for item in libs:
lib = 'lib' + item + '.a'
items.append(os.path.join(dir, lib))
return items
def Deploy(module):
if os.path.exists(ios_libs_dir):
dir = os.path.join(deploy_dir, 'ios', 'armv7', module)
Copy(GetLibs(ios_libs_dir, libs[module]), dir)
for module in libs:
Deploy(module)
|
<commit_before><commit_msg>Add prebuilt libraries deployment helper script.<commit_after>
|
#!/usr/bin/env python
import os
import shutil
script_dir = os.path.dirname(os.path.realpath(__file__))
chromium_dir = os.path.abspath(os.path.join(script_dir, 'src'))
# Solution root directory.
root_dir = os.path.abspath(os.path.join(script_dir, os.pardir))
base_libs = [
'base',
'base_i18n',
'base_prefs',
'base_static',
# Dependencies.
'dynamic_annotations',
'event',
'icudata',
'icui18n',
'icuuc',
'modp_b64',
'allocator_extension_thunks',
]
net_libs = [
'net',
]
libs = {
'base': base_libs,
'net': net_libs,
}
deploy_dir = os.path.join(script_dir, 'prebuilt')
ios_libs_dir = os.path.join(chromium_dir, 'xcodebuild', 'Debug-iphoneos')
def Copy(libs, to_path):
# Create dir if it's not exist.
if not os.path.exists(to_path):
os.makedirs(to_path)
for item in libs:
shutil.copy(item, to_path)
print 'Deploy', item
def GetLibs(dir, libs):
items = []
for item in libs:
lib = 'lib' + item + '.a'
items.append(os.path.join(dir, lib))
return items
def Deploy(module):
if os.path.exists(ios_libs_dir):
dir = os.path.join(deploy_dir, 'ios', 'armv7', module)
Copy(GetLibs(ios_libs_dir, libs[module]), dir)
for module in libs:
Deploy(module)
|
Add prebuilt libraries deployment helper script.#!/usr/bin/env python
import os
import shutil
script_dir = os.path.dirname(os.path.realpath(__file__))
chromium_dir = os.path.abspath(os.path.join(script_dir, 'src'))
# Solution root directory.
root_dir = os.path.abspath(os.path.join(script_dir, os.pardir))
base_libs = [
'base',
'base_i18n',
'base_prefs',
'base_static',
# Dependencies.
'dynamic_annotations',
'event',
'icudata',
'icui18n',
'icuuc',
'modp_b64',
'allocator_extension_thunks',
]
net_libs = [
'net',
]
libs = {
'base': base_libs,
'net': net_libs,
}
deploy_dir = os.path.join(script_dir, 'prebuilt')
ios_libs_dir = os.path.join(chromium_dir, 'xcodebuild', 'Debug-iphoneos')
def Copy(libs, to_path):
# Create dir if it's not exist.
if not os.path.exists(to_path):
os.makedirs(to_path)
for item in libs:
shutil.copy(item, to_path)
print 'Deploy', item
def GetLibs(dir, libs):
items = []
for item in libs:
lib = 'lib' + item + '.a'
items.append(os.path.join(dir, lib))
return items
def Deploy(module):
if os.path.exists(ios_libs_dir):
dir = os.path.join(deploy_dir, 'ios', 'armv7', module)
Copy(GetLibs(ios_libs_dir, libs[module]), dir)
for module in libs:
Deploy(module)
|
<commit_before><commit_msg>Add prebuilt libraries deployment helper script.<commit_after>#!/usr/bin/env python
import os
import shutil
script_dir = os.path.dirname(os.path.realpath(__file__))
chromium_dir = os.path.abspath(os.path.join(script_dir, 'src'))
# Solution root directory.
root_dir = os.path.abspath(os.path.join(script_dir, os.pardir))
base_libs = [
'base',
'base_i18n',
'base_prefs',
'base_static',
# Dependencies.
'dynamic_annotations',
'event',
'icudata',
'icui18n',
'icuuc',
'modp_b64',
'allocator_extension_thunks',
]
net_libs = [
'net',
]
libs = {
'base': base_libs,
'net': net_libs,
}
deploy_dir = os.path.join(script_dir, 'prebuilt')
ios_libs_dir = os.path.join(chromium_dir, 'xcodebuild', 'Debug-iphoneos')
def Copy(libs, to_path):
# Create dir if it's not exist.
if not os.path.exists(to_path):
os.makedirs(to_path)
for item in libs:
shutil.copy(item, to_path)
print 'Deploy', item
def GetLibs(dir, libs):
items = []
for item in libs:
lib = 'lib' + item + '.a'
items.append(os.path.join(dir, lib))
return items
def Deploy(module):
if os.path.exists(ios_libs_dir):
dir = os.path.join(deploy_dir, 'ios', 'armv7', module)
Copy(GetLibs(ios_libs_dir, libs[module]), dir)
for module in libs:
Deploy(module)
|
|
cedd86b6ad54319ad44a961bc51c13f78e209c76
|
backend/globaleaks/tests/jobs/test_base.py
|
backend/globaleaks/tests/jobs/test_base.py
|
# -*- coding: utf-8 -*-
from twisted.internet.defer import inlineCallbacks
from globaleaks.tests import helpers
from globaleaks.jobs import base
class TestGLJob(helpers.TestGLWithPopulatedDB):
@inlineCallbacks
def test_base_scheduler(self):
yield base.GLJob()._operation()
|
Implement unit testing of the schedulers base class
|
Implement unit testing of the schedulers base class
|
Python
|
agpl-3.0
|
vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks
|
Implement unit testing of the schedulers base class
|
# -*- coding: utf-8 -*-
from twisted.internet.defer import inlineCallbacks
from globaleaks.tests import helpers
from globaleaks.jobs import base
class TestGLJob(helpers.TestGLWithPopulatedDB):
@inlineCallbacks
def test_base_scheduler(self):
yield base.GLJob()._operation()
|
<commit_before><commit_msg>Implement unit testing of the schedulers base class<commit_after>
|
# -*- coding: utf-8 -*-
from twisted.internet.defer import inlineCallbacks
from globaleaks.tests import helpers
from globaleaks.jobs import base
class TestGLJob(helpers.TestGLWithPopulatedDB):
@inlineCallbacks
def test_base_scheduler(self):
yield base.GLJob()._operation()
|
Implement unit testing of the schedulers base class# -*- coding: utf-8 -*-
from twisted.internet.defer import inlineCallbacks
from globaleaks.tests import helpers
from globaleaks.jobs import base
class TestGLJob(helpers.TestGLWithPopulatedDB):
@inlineCallbacks
def test_base_scheduler(self):
yield base.GLJob()._operation()
|
<commit_before><commit_msg>Implement unit testing of the schedulers base class<commit_after># -*- coding: utf-8 -*-
from twisted.internet.defer import inlineCallbacks
from globaleaks.tests import helpers
from globaleaks.jobs import base
class TestGLJob(helpers.TestGLWithPopulatedDB):
@inlineCallbacks
def test_base_scheduler(self):
yield base.GLJob()._operation()
|
|
815a9c802440375cc283179c15d3b1a371863418
|
tests/test_class_based.py
|
tests/test_class_based.py
|
"""tests/test_decorators.py.
Tests that class based hug routes interact as expected
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import hug
api = sys.modules[__name__]
def test_simple_class_based_view():
'''Test creating class based routers'''
@hug.classy.urls('/endpoint', requires=())
class MyClass(object):
@hug.classy.get()
def my_method(self):
return 'hi there!'
@hug.classy.post()
def my_method_two(self):
return 'bye'
assert hug.test.get(api, 'endpoint').data == 'hi there!'
assert hug.test.post(api, 'endpoint').data == 'bye'
def test_simple_class_based_method_view():
'''Test creating class based routers using method mappings'''
@hug.classy.auto_http_methods()
class EndPoint(object):
def get(self):
return 'hi there!'
def post(self):
return 'bye'
assert hug.test.get(api, 'endpoint').data == 'hi there!'
assert hug.test.post(api, 'endpoint').data == 'bye'
def test_routing_class_based_method_view_with_sub_routing():
'''Test creating class based routers using method mappings, then overriding url on sub method'''
@hug.classy.auto_http_methods()
class EndPoint(object):
def get(self):
return 'hi there!'
@hug.classy.urls('/home/')
def post(self):
return 'bye'
assert hug.test.get(api, 'endpoint').data == 'hi there!'
assert hug.test.post(api, 'home').data == 'bye'
def test_routing_instance():
'''Test to ensure its possible to route a class after it is instanciated'''
class EndPoint(object):
@hug.classy
def one(self):
return 'one'
@hug.classy
def two(self):
return 2
hug.classy.get()(EndPoint())
assert hug.test.get(api, 'one').data == 'one'
assert hug.test.get(api, 'two').data == 2
|
Add test for desired support of class based routers
|
Add test for desired support of class based routers
|
Python
|
mit
|
timothycrosley/hug,timothycrosley/hug,MuhammadAlkarouri/hug,MuhammadAlkarouri/hug,MuhammadAlkarouri/hug,timothycrosley/hug
|
Add test for desired support of class based routers
|
"""tests/test_decorators.py.
Tests that class based hug routes interact as expected
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import hug
api = sys.modules[__name__]
def test_simple_class_based_view():
'''Test creating class based routers'''
@hug.classy.urls('/endpoint', requires=())
class MyClass(object):
@hug.classy.get()
def my_method(self):
return 'hi there!'
@hug.classy.post()
def my_method_two(self):
return 'bye'
assert hug.test.get(api, 'endpoint').data == 'hi there!'
assert hug.test.post(api, 'endpoint').data == 'bye'
def test_simple_class_based_method_view():
'''Test creating class based routers using method mappings'''
@hug.classy.auto_http_methods()
class EndPoint(object):
def get(self):
return 'hi there!'
def post(self):
return 'bye'
assert hug.test.get(api, 'endpoint').data == 'hi there!'
assert hug.test.post(api, 'endpoint').data == 'bye'
def test_routing_class_based_method_view_with_sub_routing():
'''Test creating class based routers using method mappings, then overriding url on sub method'''
@hug.classy.auto_http_methods()
class EndPoint(object):
def get(self):
return 'hi there!'
@hug.classy.urls('/home/')
def post(self):
return 'bye'
assert hug.test.get(api, 'endpoint').data == 'hi there!'
assert hug.test.post(api, 'home').data == 'bye'
def test_routing_instance():
'''Test to ensure its possible to route a class after it is instanciated'''
class EndPoint(object):
@hug.classy
def one(self):
return 'one'
@hug.classy
def two(self):
return 2
hug.classy.get()(EndPoint())
assert hug.test.get(api, 'one').data == 'one'
assert hug.test.get(api, 'two').data == 2
|
<commit_before><commit_msg>Add test for desired support of class based routers<commit_after>
|
"""tests/test_decorators.py.
Tests that class based hug routes interact as expected
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import hug
api = sys.modules[__name__]
def test_simple_class_based_view():
'''Test creating class based routers'''
@hug.classy.urls('/endpoint', requires=())
class MyClass(object):
@hug.classy.get()
def my_method(self):
return 'hi there!'
@hug.classy.post()
def my_method_two(self):
return 'bye'
assert hug.test.get(api, 'endpoint').data == 'hi there!'
assert hug.test.post(api, 'endpoint').data == 'bye'
def test_simple_class_based_method_view():
'''Test creating class based routers using method mappings'''
@hug.classy.auto_http_methods()
class EndPoint(object):
def get(self):
return 'hi there!'
def post(self):
return 'bye'
assert hug.test.get(api, 'endpoint').data == 'hi there!'
assert hug.test.post(api, 'endpoint').data == 'bye'
def test_routing_class_based_method_view_with_sub_routing():
'''Test creating class based routers using method mappings, then overriding url on sub method'''
@hug.classy.auto_http_methods()
class EndPoint(object):
def get(self):
return 'hi there!'
@hug.classy.urls('/home/')
def post(self):
return 'bye'
assert hug.test.get(api, 'endpoint').data == 'hi there!'
assert hug.test.post(api, 'home').data == 'bye'
def test_routing_instance():
'''Test to ensure its possible to route a class after it is instanciated'''
class EndPoint(object):
@hug.classy
def one(self):
return 'one'
@hug.classy
def two(self):
return 2
hug.classy.get()(EndPoint())
assert hug.test.get(api, 'one').data == 'one'
assert hug.test.get(api, 'two').data == 2
|
Add test for desired support of class based routers"""tests/test_decorators.py.
Tests that class based hug routes interact as expected
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import hug
api = sys.modules[__name__]
def test_simple_class_based_view():
'''Test creating class based routers'''
@hug.classy.urls('/endpoint', requires=())
class MyClass(object):
@hug.classy.get()
def my_method(self):
return 'hi there!'
@hug.classy.post()
def my_method_two(self):
return 'bye'
assert hug.test.get(api, 'endpoint').data == 'hi there!'
assert hug.test.post(api, 'endpoint').data == 'bye'
def test_simple_class_based_method_view():
'''Test creating class based routers using method mappings'''
@hug.classy.auto_http_methods()
class EndPoint(object):
def get(self):
return 'hi there!'
def post(self):
return 'bye'
assert hug.test.get(api, 'endpoint').data == 'hi there!'
assert hug.test.post(api, 'endpoint').data == 'bye'
def test_routing_class_based_method_view_with_sub_routing():
'''Test creating class based routers using method mappings, then overriding url on sub method'''
@hug.classy.auto_http_methods()
class EndPoint(object):
def get(self):
return 'hi there!'
@hug.classy.urls('/home/')
def post(self):
return 'bye'
assert hug.test.get(api, 'endpoint').data == 'hi there!'
assert hug.test.post(api, 'home').data == 'bye'
def test_routing_instance():
'''Test to ensure its possible to route a class after it is instanciated'''
class EndPoint(object):
@hug.classy
def one(self):
return 'one'
@hug.classy
def two(self):
return 2
hug.classy.get()(EndPoint())
assert hug.test.get(api, 'one').data == 'one'
assert hug.test.get(api, 'two').data == 2
|
<commit_before><commit_msg>Add test for desired support of class based routers<commit_after>"""tests/test_decorators.py.
Tests that class based hug routes interact as expected
Copyright (C) 2015 Timothy Edmund Crosley
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and
to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
import sys
import hug
api = sys.modules[__name__]
def test_simple_class_based_view():
'''Test creating class based routers'''
@hug.classy.urls('/endpoint', requires=())
class MyClass(object):
@hug.classy.get()
def my_method(self):
return 'hi there!'
@hug.classy.post()
def my_method_two(self):
return 'bye'
assert hug.test.get(api, 'endpoint').data == 'hi there!'
assert hug.test.post(api, 'endpoint').data == 'bye'
def test_simple_class_based_method_view():
'''Test creating class based routers using method mappings'''
@hug.classy.auto_http_methods()
class EndPoint(object):
def get(self):
return 'hi there!'
def post(self):
return 'bye'
assert hug.test.get(api, 'endpoint').data == 'hi there!'
assert hug.test.post(api, 'endpoint').data == 'bye'
def test_routing_class_based_method_view_with_sub_routing():
'''Test creating class based routers using method mappings, then overriding url on sub method'''
@hug.classy.auto_http_methods()
class EndPoint(object):
def get(self):
return 'hi there!'
@hug.classy.urls('/home/')
def post(self):
return 'bye'
assert hug.test.get(api, 'endpoint').data == 'hi there!'
assert hug.test.post(api, 'home').data == 'bye'
def test_routing_instance():
'''Test to ensure its possible to route a class after it is instanciated'''
class EndPoint(object):
@hug.classy
def one(self):
return 'one'
@hug.classy
def two(self):
return 2
hug.classy.get()(EndPoint())
assert hug.test.get(api, 'one').data == 'one'
assert hug.test.get(api, 'two').data == 2
|
|
1a8fea9c752845247c592f0a0bd6ffd8e8f259e2
|
eodatasets/__main__.py
|
eodatasets/__main__.py
|
import click
import os
from pathlib import Path
import logging
from eodatasets.package import package_ortho, package_nbar, package_raw, get_dataset
_DATASET_PACKAGERS = {
'ortho': package_ortho,
'nbar': package_nbar,
'raw': package_raw
}
@click.command()
@click.option('--ancestor', type=click.Path(exists=True, readable=True, writable=False), multiple=True)
@click.option('--debug', is_flag=True)
@click.argument('type', type=click.Choice(_DATASET_PACKAGERS.keys()))
@click.argument('dataset', type=click.Path(exists=True, readable=True, writable=False), nargs=-1)
@click.argument('destination', type=click.Path(exists=True, readable=True, writable=True), nargs=1)
def run_packaging(ancestor, debug, type, dataset, destination):
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s')
if debug:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger('eodatasets').setLevel(logging.INFO)
ancestor_datasets = {}
# TODO: detect actual ancestor types.
if ancestor:
ancestor_datasets.update({'raw': get_dataset(Path(ancestor[0]))})
for dataset_path in dataset:
destination = os.path.join(destination, type)
if not os.path.exists(destination):
os.mkdir(destination)
_DATASET_PACKAGERS[type](
dataset_path,
destination,
source_datasets=ancestor_datasets
)
run_packaging()
|
Add simple initial command line interface.
|
Add simple initial command line interface.
|
Python
|
apache-2.0
|
jeremyh/eo-datasets,GeoscienceAustralia/eo-datasets,GeoscienceAustralia/eo-datasets,jeremyh/eo-datasets
|
Add simple initial command line interface.
|
import click
import os
from pathlib import Path
import logging
from eodatasets.package import package_ortho, package_nbar, package_raw, get_dataset
_DATASET_PACKAGERS = {
'ortho': package_ortho,
'nbar': package_nbar,
'raw': package_raw
}
@click.command()
@click.option('--ancestor', type=click.Path(exists=True, readable=True, writable=False), multiple=True)
@click.option('--debug', is_flag=True)
@click.argument('type', type=click.Choice(_DATASET_PACKAGERS.keys()))
@click.argument('dataset', type=click.Path(exists=True, readable=True, writable=False), nargs=-1)
@click.argument('destination', type=click.Path(exists=True, readable=True, writable=True), nargs=1)
def run_packaging(ancestor, debug, type, dataset, destination):
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s')
if debug:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger('eodatasets').setLevel(logging.INFO)
ancestor_datasets = {}
# TODO: detect actual ancestor types.
if ancestor:
ancestor_datasets.update({'raw': get_dataset(Path(ancestor[0]))})
for dataset_path in dataset:
destination = os.path.join(destination, type)
if not os.path.exists(destination):
os.mkdir(destination)
_DATASET_PACKAGERS[type](
dataset_path,
destination,
source_datasets=ancestor_datasets
)
run_packaging()
|
<commit_before><commit_msg>Add simple initial command line interface.<commit_after>
|
import click
import os
from pathlib import Path
import logging
from eodatasets.package import package_ortho, package_nbar, package_raw, get_dataset
_DATASET_PACKAGERS = {
'ortho': package_ortho,
'nbar': package_nbar,
'raw': package_raw
}
@click.command()
@click.option('--ancestor', type=click.Path(exists=True, readable=True, writable=False), multiple=True)
@click.option('--debug', is_flag=True)
@click.argument('type', type=click.Choice(_DATASET_PACKAGERS.keys()))
@click.argument('dataset', type=click.Path(exists=True, readable=True, writable=False), nargs=-1)
@click.argument('destination', type=click.Path(exists=True, readable=True, writable=True), nargs=1)
def run_packaging(ancestor, debug, type, dataset, destination):
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s')
if debug:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger('eodatasets').setLevel(logging.INFO)
ancestor_datasets = {}
# TODO: detect actual ancestor types.
if ancestor:
ancestor_datasets.update({'raw': get_dataset(Path(ancestor[0]))})
for dataset_path in dataset:
destination = os.path.join(destination, type)
if not os.path.exists(destination):
os.mkdir(destination)
_DATASET_PACKAGERS[type](
dataset_path,
destination,
source_datasets=ancestor_datasets
)
run_packaging()
|
Add simple initial command line interface.import click
import os
from pathlib import Path
import logging
from eodatasets.package import package_ortho, package_nbar, package_raw, get_dataset
_DATASET_PACKAGERS = {
'ortho': package_ortho,
'nbar': package_nbar,
'raw': package_raw
}
@click.command()
@click.option('--ancestor', type=click.Path(exists=True, readable=True, writable=False), multiple=True)
@click.option('--debug', is_flag=True)
@click.argument('type', type=click.Choice(_DATASET_PACKAGERS.keys()))
@click.argument('dataset', type=click.Path(exists=True, readable=True, writable=False), nargs=-1)
@click.argument('destination', type=click.Path(exists=True, readable=True, writable=True), nargs=1)
def run_packaging(ancestor, debug, type, dataset, destination):
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s')
if debug:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger('eodatasets').setLevel(logging.INFO)
ancestor_datasets = {}
# TODO: detect actual ancestor types.
if ancestor:
ancestor_datasets.update({'raw': get_dataset(Path(ancestor[0]))})
for dataset_path in dataset:
destination = os.path.join(destination, type)
if not os.path.exists(destination):
os.mkdir(destination)
_DATASET_PACKAGERS[type](
dataset_path,
destination,
source_datasets=ancestor_datasets
)
run_packaging()
|
<commit_before><commit_msg>Add simple initial command line interface.<commit_after>import click
import os
from pathlib import Path
import logging
from eodatasets.package import package_ortho, package_nbar, package_raw, get_dataset
_DATASET_PACKAGERS = {
'ortho': package_ortho,
'nbar': package_nbar,
'raw': package_raw
}
@click.command()
@click.option('--ancestor', type=click.Path(exists=True, readable=True, writable=False), multiple=True)
@click.option('--debug', is_flag=True)
@click.argument('type', type=click.Choice(_DATASET_PACKAGERS.keys()))
@click.argument('dataset', type=click.Path(exists=True, readable=True, writable=False), nargs=-1)
@click.argument('destination', type=click.Path(exists=True, readable=True, writable=True), nargs=1)
def run_packaging(ancestor, debug, type, dataset, destination):
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s')
if debug:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger('eodatasets').setLevel(logging.INFO)
ancestor_datasets = {}
# TODO: detect actual ancestor types.
if ancestor:
ancestor_datasets.update({'raw': get_dataset(Path(ancestor[0]))})
for dataset_path in dataset:
destination = os.path.join(destination, type)
if not os.path.exists(destination):
os.mkdir(destination)
_DATASET_PACKAGERS[type](
dataset_path,
destination,
source_datasets=ancestor_datasets
)
run_packaging()
|
|
e3d90957c4fa78a85bb250a6ec82eff43ec5be7d
|
tests/test_losses.py
|
tests/test_losses.py
|
import keras_retinanet.losses
import keras
import numpy as np
import pytest
def test_smooth_l1():
regression = np.array([
[
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
]
], dtype=keras.backend.floatx())
regression = keras.backend.variable(regression)
regression_target = np.array([
[
[0, 0, 0, 1, 1],
[0, 0, 1, 0, 1],
[0, 0, 0.05, 0, 1],
[0, 0, 1, 0, 0],
]
], dtype=keras.backend.floatx())
regression_target = keras.backend.variable(regression_target)
loss = keras_retinanet.losses.smooth_l1()(regression_target, regression)
loss = keras.backend.eval(loss)
assert loss == pytest.approx((((1 - 0.5 / 9) * 2 + (0.5 * 9 * 0.05 ** 2)) / 3))
|
Add unit test for smooth_l1.
|
Add unit test for smooth_l1.
|
Python
|
apache-2.0
|
delftrobotics/keras-retinanet
|
Add unit test for smooth_l1.
|
import keras_retinanet.losses
import keras
import numpy as np
import pytest
def test_smooth_l1():
regression = np.array([
[
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
]
], dtype=keras.backend.floatx())
regression = keras.backend.variable(regression)
regression_target = np.array([
[
[0, 0, 0, 1, 1],
[0, 0, 1, 0, 1],
[0, 0, 0.05, 0, 1],
[0, 0, 1, 0, 0],
]
], dtype=keras.backend.floatx())
regression_target = keras.backend.variable(regression_target)
loss = keras_retinanet.losses.smooth_l1()(regression_target, regression)
loss = keras.backend.eval(loss)
assert loss == pytest.approx((((1 - 0.5 / 9) * 2 + (0.5 * 9 * 0.05 ** 2)) / 3))
|
<commit_before><commit_msg>Add unit test for smooth_l1.<commit_after>
|
import keras_retinanet.losses
import keras
import numpy as np
import pytest
def test_smooth_l1():
regression = np.array([
[
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
]
], dtype=keras.backend.floatx())
regression = keras.backend.variable(regression)
regression_target = np.array([
[
[0, 0, 0, 1, 1],
[0, 0, 1, 0, 1],
[0, 0, 0.05, 0, 1],
[0, 0, 1, 0, 0],
]
], dtype=keras.backend.floatx())
regression_target = keras.backend.variable(regression_target)
loss = keras_retinanet.losses.smooth_l1()(regression_target, regression)
loss = keras.backend.eval(loss)
assert loss == pytest.approx((((1 - 0.5 / 9) * 2 + (0.5 * 9 * 0.05 ** 2)) / 3))
|
Add unit test for smooth_l1.import keras_retinanet.losses
import keras
import numpy as np
import pytest
def test_smooth_l1():
regression = np.array([
[
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
]
], dtype=keras.backend.floatx())
regression = keras.backend.variable(regression)
regression_target = np.array([
[
[0, 0, 0, 1, 1],
[0, 0, 1, 0, 1],
[0, 0, 0.05, 0, 1],
[0, 0, 1, 0, 0],
]
], dtype=keras.backend.floatx())
regression_target = keras.backend.variable(regression_target)
loss = keras_retinanet.losses.smooth_l1()(regression_target, regression)
loss = keras.backend.eval(loss)
assert loss == pytest.approx((((1 - 0.5 / 9) * 2 + (0.5 * 9 * 0.05 ** 2)) / 3))
|
<commit_before><commit_msg>Add unit test for smooth_l1.<commit_after>import keras_retinanet.losses
import keras
import numpy as np
import pytest
def test_smooth_l1():
regression = np.array([
[
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0],
]
], dtype=keras.backend.floatx())
regression = keras.backend.variable(regression)
regression_target = np.array([
[
[0, 0, 0, 1, 1],
[0, 0, 1, 0, 1],
[0, 0, 0.05, 0, 1],
[0, 0, 1, 0, 0],
]
], dtype=keras.backend.floatx())
regression_target = keras.backend.variable(regression_target)
loss = keras_retinanet.losses.smooth_l1()(regression_target, regression)
loss = keras.backend.eval(loss)
assert loss == pytest.approx((((1 - 0.5 / 9) * 2 + (0.5 * 9 * 0.05 ** 2)) / 3))
|
|
ea0f2a6566ed6d4770d6f5f5b59550c54579a6b8
|
tests/test_others.py
|
tests/test_others.py
|
from typing import Optional
import typer
from typer.main import solve_typer_info_defaults, solve_typer_info_help
from typer.models import TyperInfo
from typer.testing import CliRunner
runner = CliRunner()
def test_optional():
app = typer.Typer()
@app.command()
def opt(user: Optional[str] = None):
if user:
typer.echo(f"User: {user}")
else:
typer.echo("No user")
result = runner.invoke(app)
assert result.exit_code == 0
assert "No user" in result.output
result = runner.invoke(app, ["--user", "Camila"])
assert result.exit_code == 0
assert "User: Camila" in result.output
def test_no_type():
app = typer.Typer()
@app.command()
def no_type(user):
typer.echo(f"User: {user}")
result = runner.invoke(app, ["Camila"])
assert result.exit_code == 0
assert "User: Camila" in result.output
def test_help_from_info():
# Mainly for coverage/completeness
value = solve_typer_info_help(TyperInfo())
assert value is None
def test_defaults_from_info():
# Mainly for coverage/completeness
value = solve_typer_info_defaults(TyperInfo())
assert value
|
Add extra tests for edge cases that don't belong on docs
|
:white_check_mark: Add extra tests for edge cases that don't belong on docs
|
Python
|
mit
|
tiangolo/typer,tiangolo/typer
|
:white_check_mark: Add extra tests for edge cases that don't belong on docs
|
from typing import Optional
import typer
from typer.main import solve_typer_info_defaults, solve_typer_info_help
from typer.models import TyperInfo
from typer.testing import CliRunner
runner = CliRunner()
def test_optional():
app = typer.Typer()
@app.command()
def opt(user: Optional[str] = None):
if user:
typer.echo(f"User: {user}")
else:
typer.echo("No user")
result = runner.invoke(app)
assert result.exit_code == 0
assert "No user" in result.output
result = runner.invoke(app, ["--user", "Camila"])
assert result.exit_code == 0
assert "User: Camila" in result.output
def test_no_type():
app = typer.Typer()
@app.command()
def no_type(user):
typer.echo(f"User: {user}")
result = runner.invoke(app, ["Camila"])
assert result.exit_code == 0
assert "User: Camila" in result.output
def test_help_from_info():
# Mainly for coverage/completeness
value = solve_typer_info_help(TyperInfo())
assert value is None
def test_defaults_from_info():
# Mainly for coverage/completeness
value = solve_typer_info_defaults(TyperInfo())
assert value
|
<commit_before><commit_msg>:white_check_mark: Add extra tests for edge cases that don't belong on docs<commit_after>
|
from typing import Optional
import typer
from typer.main import solve_typer_info_defaults, solve_typer_info_help
from typer.models import TyperInfo
from typer.testing import CliRunner
runner = CliRunner()
def test_optional():
app = typer.Typer()
@app.command()
def opt(user: Optional[str] = None):
if user:
typer.echo(f"User: {user}")
else:
typer.echo("No user")
result = runner.invoke(app)
assert result.exit_code == 0
assert "No user" in result.output
result = runner.invoke(app, ["--user", "Camila"])
assert result.exit_code == 0
assert "User: Camila" in result.output
def test_no_type():
app = typer.Typer()
@app.command()
def no_type(user):
typer.echo(f"User: {user}")
result = runner.invoke(app, ["Camila"])
assert result.exit_code == 0
assert "User: Camila" in result.output
def test_help_from_info():
# Mainly for coverage/completeness
value = solve_typer_info_help(TyperInfo())
assert value is None
def test_defaults_from_info():
# Mainly for coverage/completeness
value = solve_typer_info_defaults(TyperInfo())
assert value
|
:white_check_mark: Add extra tests for edge cases that don't belong on docsfrom typing import Optional
import typer
from typer.main import solve_typer_info_defaults, solve_typer_info_help
from typer.models import TyperInfo
from typer.testing import CliRunner
runner = CliRunner()
def test_optional():
app = typer.Typer()
@app.command()
def opt(user: Optional[str] = None):
if user:
typer.echo(f"User: {user}")
else:
typer.echo("No user")
result = runner.invoke(app)
assert result.exit_code == 0
assert "No user" in result.output
result = runner.invoke(app, ["--user", "Camila"])
assert result.exit_code == 0
assert "User: Camila" in result.output
def test_no_type():
app = typer.Typer()
@app.command()
def no_type(user):
typer.echo(f"User: {user}")
result = runner.invoke(app, ["Camila"])
assert result.exit_code == 0
assert "User: Camila" in result.output
def test_help_from_info():
# Mainly for coverage/completeness
value = solve_typer_info_help(TyperInfo())
assert value is None
def test_defaults_from_info():
# Mainly for coverage/completeness
value = solve_typer_info_defaults(TyperInfo())
assert value
|
<commit_before><commit_msg>:white_check_mark: Add extra tests for edge cases that don't belong on docs<commit_after>from typing import Optional
import typer
from typer.main import solve_typer_info_defaults, solve_typer_info_help
from typer.models import TyperInfo
from typer.testing import CliRunner
runner = CliRunner()
def test_optional():
app = typer.Typer()
@app.command()
def opt(user: Optional[str] = None):
if user:
typer.echo(f"User: {user}")
else:
typer.echo("No user")
result = runner.invoke(app)
assert result.exit_code == 0
assert "No user" in result.output
result = runner.invoke(app, ["--user", "Camila"])
assert result.exit_code == 0
assert "User: Camila" in result.output
def test_no_type():
app = typer.Typer()
@app.command()
def no_type(user):
typer.echo(f"User: {user}")
result = runner.invoke(app, ["Camila"])
assert result.exit_code == 0
assert "User: Camila" in result.output
def test_help_from_info():
# Mainly for coverage/completeness
value = solve_typer_info_help(TyperInfo())
assert value is None
def test_defaults_from_info():
# Mainly for coverage/completeness
value = solve_typer_info_defaults(TyperInfo())
assert value
|
|
c97d77f058c73e5c8da4c108681870ff8f0abd71
|
examples/no-minimum.py
|
examples/no-minimum.py
|
from simplex.algorithm import NelderMeadSimplex
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
# Define objective function
def objective(xs):
x1, x2 = xs[0], xs[1]
return x1*x2
# Define callback function
simplices = []
def callback(args):
simplices.append(args[0])
# Initial simplex
simplex = np.array([[0,0], [1,1], [0,1]], dtype=np.float)
# Initialise NelderMead simplex algorithm
nm = NelderMeadSimplex(objective, simplex, epsilon=1e-6, callback=callback)
# Minimise the objective function
solution = nm.solve()
print("Minimum at {}".format(solution))
# Tabulate objective function
x = np.linspace(-3, 3, 1000)
y = np.linspace(-3, 3, 1000)
X, Y = np.meshgrid(x, y)
Z = X*Y
# Plot function contours together with the evolution of
# the simplices as they approach the minimum
fig = plt.figure()
cs = plt.contour(X, Y, Z, 20)
plt.clabel(cs, inline=1, fontsize=10)
for simplex in simplices:
lines = []
for i in range(3):
for j in range(i, 3):
if j == i:
continue
plt.plot(*zip(simplex[i], simplex[j]), c='black')
plt.xlabel(r"$$x_1$$")
plt.ylabel(r"$$x_2$$")
plt.grid()
plt.savefig('no-minimum.png', bbox_inches=0, dpi=80)
|
Add example with no minimum.
|
Add example with no minimum.
|
Python
|
mit
|
kubkon/simplex
|
Add example with no minimum.
|
from simplex.algorithm import NelderMeadSimplex
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
# Define objective function
def objective(xs):
x1, x2 = xs[0], xs[1]
return x1*x2
# Define callback function
simplices = []
def callback(args):
simplices.append(args[0])
# Initial simplex
simplex = np.array([[0,0], [1,1], [0,1]], dtype=np.float)
# Initialise NelderMead simplex algorithm
nm = NelderMeadSimplex(objective, simplex, epsilon=1e-6, callback=callback)
# Minimise the objective function
solution = nm.solve()
print("Minimum at {}".format(solution))
# Tabulate objective function
x = np.linspace(-3, 3, 1000)
y = np.linspace(-3, 3, 1000)
X, Y = np.meshgrid(x, y)
Z = X*Y
# Plot function contours together with the evolution of
# the simplices as they approach the minimum
fig = plt.figure()
cs = plt.contour(X, Y, Z, 20)
plt.clabel(cs, inline=1, fontsize=10)
for simplex in simplices:
lines = []
for i in range(3):
for j in range(i, 3):
if j == i:
continue
plt.plot(*zip(simplex[i], simplex[j]), c='black')
plt.xlabel(r"$$x_1$$")
plt.ylabel(r"$$x_2$$")
plt.grid()
plt.savefig('no-minimum.png', bbox_inches=0, dpi=80)
|
<commit_before><commit_msg>Add example with no minimum.<commit_after>
|
from simplex.algorithm import NelderMeadSimplex
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
# Define objective function
def objective(xs):
x1, x2 = xs[0], xs[1]
return x1*x2
# Define callback function
simplices = []
def callback(args):
simplices.append(args[0])
# Initial simplex
simplex = np.array([[0,0], [1,1], [0,1]], dtype=np.float)
# Initialise NelderMead simplex algorithm
nm = NelderMeadSimplex(objective, simplex, epsilon=1e-6, callback=callback)
# Minimise the objective function
solution = nm.solve()
print("Minimum at {}".format(solution))
# Tabulate objective function
x = np.linspace(-3, 3, 1000)
y = np.linspace(-3, 3, 1000)
X, Y = np.meshgrid(x, y)
Z = X*Y
# Plot function contours together with the evolution of
# the simplices as they approach the minimum
fig = plt.figure()
cs = plt.contour(X, Y, Z, 20)
plt.clabel(cs, inline=1, fontsize=10)
for simplex in simplices:
lines = []
for i in range(3):
for j in range(i, 3):
if j == i:
continue
plt.plot(*zip(simplex[i], simplex[j]), c='black')
plt.xlabel(r"$$x_1$$")
plt.ylabel(r"$$x_2$$")
plt.grid()
plt.savefig('no-minimum.png', bbox_inches=0, dpi=80)
|
Add example with no minimum.from simplex.algorithm import NelderMeadSimplex
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
# Define objective function
def objective(xs):
x1, x2 = xs[0], xs[1]
return x1*x2
# Define callback function
simplices = []
def callback(args):
simplices.append(args[0])
# Initial simplex
simplex = np.array([[0,0], [1,1], [0,1]], dtype=np.float)
# Initialise NelderMead simplex algorithm
nm = NelderMeadSimplex(objective, simplex, epsilon=1e-6, callback=callback)
# Minimise the objective function
solution = nm.solve()
print("Minimum at {}".format(solution))
# Tabulate objective function
x = np.linspace(-3, 3, 1000)
y = np.linspace(-3, 3, 1000)
X, Y = np.meshgrid(x, y)
Z = X*Y
# Plot function contours together with the evolution of
# the simplices as they approach the minimum
fig = plt.figure()
cs = plt.contour(X, Y, Z, 20)
plt.clabel(cs, inline=1, fontsize=10)
for simplex in simplices:
lines = []
for i in range(3):
for j in range(i, 3):
if j == i:
continue
plt.plot(*zip(simplex[i], simplex[j]), c='black')
plt.xlabel(r"$$x_1$$")
plt.ylabel(r"$$x_2$$")
plt.grid()
plt.savefig('no-minimum.png', bbox_inches=0, dpi=80)
|
<commit_before><commit_msg>Add example with no minimum.<commit_after>from simplex.algorithm import NelderMeadSimplex
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
rc('text', usetex=True)
# Define objective function
def objective(xs):
x1, x2 = xs[0], xs[1]
return x1*x2
# Define callback function
simplices = []
def callback(args):
simplices.append(args[0])
# Initial simplex
simplex = np.array([[0,0], [1,1], [0,1]], dtype=np.float)
# Initialise NelderMead simplex algorithm
nm = NelderMeadSimplex(objective, simplex, epsilon=1e-6, callback=callback)
# Minimise the objective function
solution = nm.solve()
print("Minimum at {}".format(solution))
# Tabulate objective function
x = np.linspace(-3, 3, 1000)
y = np.linspace(-3, 3, 1000)
X, Y = np.meshgrid(x, y)
Z = X*Y
# Plot function contours together with the evolution of
# the simplices as they approach the minimum
fig = plt.figure()
cs = plt.contour(X, Y, Z, 20)
plt.clabel(cs, inline=1, fontsize=10)
for simplex in simplices:
lines = []
for i in range(3):
for j in range(i, 3):
if j == i:
continue
plt.plot(*zip(simplex[i], simplex[j]), c='black')
plt.xlabel(r"$$x_1$$")
plt.ylabel(r"$$x_2$$")
plt.grid()
plt.savefig('no-minimum.png', bbox_inches=0, dpi=80)
|
|
4b58d8153bcf7612a2d3ab360df941089e45ed3e
|
trim.py
|
trim.py
|
"""Create a new folder of images that consist of only the cell chamber
Name of save folder is specified in commandline"""
import sys
import os
import cv2
import numpy as np
if __name__ == '__main__':
# might want to add options for other arguments
assert len(sys.argv) == 2
saveFolderName = sys.argv[1]
# call a function that will return a list of numpy matrices
# create the folder
if not os.path.exists(saveFolderName):
os.makedirs(saveFolderName)
# for np matrix in list, save
pass
def return_numpy_list():
"""Return list of np matrices representing each image ROI"""
numpy_list = []
# get files from other function
# process each file, append result to list
return numpy_list
def get_file_list():
"""Return list of all .tif files in the current directory"""
file_list = []
for fname in os.listdir("./"):
if fname.endswith(".tif"):
print("Found " + fname)
file_list.append(fname)
def process_single(fname):
f = cv2.imread(fname, cv2.IMREAD_GRAYSCALE) # Need grayscale for Hough line transform
pass
|
Add initial; most methods not implemented
|
Add initial; most methods not implemented
|
Python
|
mit
|
justinjoh/get-ROI
|
Add initial; most methods not implemented
|
"""Create a new folder of images that consist of only the cell chamber
Name of save folder is specified in commandline"""
import sys
import os
import cv2
import numpy as np
if __name__ == '__main__':
# might want to add options for other arguments
assert len(sys.argv) == 2
saveFolderName = sys.argv[1]
# call a function that will return a list of numpy matrices
# create the folder
if not os.path.exists(saveFolderName):
os.makedirs(saveFolderName)
# for np matrix in list, save
pass
def return_numpy_list():
"""Return list of np matrices representing each image ROI"""
numpy_list = []
# get files from other function
# process each file, append result to list
return numpy_list
def get_file_list():
"""Return list of all .tif files in the current directory"""
file_list = []
for fname in os.listdir("./"):
if fname.endswith(".tif"):
print("Found " + fname)
file_list.append(fname)
def process_single(fname):
f = cv2.imread(fname, cv2.IMREAD_GRAYSCALE) # Need grayscale for Hough line transform
pass
|
<commit_before><commit_msg>Add initial; most methods not implemented<commit_after>
|
"""Create a new folder of images that consist of only the cell chamber
Name of save folder is specified in commandline"""
import sys
import os
import cv2
import numpy as np
if __name__ == '__main__':
# might want to add options for other arguments
assert len(sys.argv) == 2
saveFolderName = sys.argv[1]
# call a function that will return a list of numpy matrices
# create the folder
if not os.path.exists(saveFolderName):
os.makedirs(saveFolderName)
# for np matrix in list, save
pass
def return_numpy_list():
"""Return list of np matrices representing each image ROI"""
numpy_list = []
# get files from other function
# process each file, append result to list
return numpy_list
def get_file_list():
"""Return list of all .tif files in the current directory"""
file_list = []
for fname in os.listdir("./"):
if fname.endswith(".tif"):
print("Found " + fname)
file_list.append(fname)
def process_single(fname):
f = cv2.imread(fname, cv2.IMREAD_GRAYSCALE) # Need grayscale for Hough line transform
pass
|
Add initial; most methods not implemented"""Create a new folder of images that consist of only the cell chamber
Name of save folder is specified in commandline"""
import sys
import os
import cv2
import numpy as np
if __name__ == '__main__':
# might want to add options for other arguments
assert len(sys.argv) == 2
saveFolderName = sys.argv[1]
# call a function that will return a list of numpy matrices
# create the folder
if not os.path.exists(saveFolderName):
os.makedirs(saveFolderName)
# for np matrix in list, save
pass
def return_numpy_list():
"""Return list of np matrices representing each image ROI"""
numpy_list = []
# get files from other function
# process each file, append result to list
return numpy_list
def get_file_list():
"""Return list of all .tif files in the current directory"""
file_list = []
for fname in os.listdir("./"):
if fname.endswith(".tif"):
print("Found " + fname)
file_list.append(fname)
def process_single(fname):
f = cv2.imread(fname, cv2.IMREAD_GRAYSCALE) # Need grayscale for Hough line transform
pass
|
<commit_before><commit_msg>Add initial; most methods not implemented<commit_after>"""Create a new folder of images that consist of only the cell chamber
Name of save folder is specified in commandline"""
import sys
import os
import cv2
import numpy as np
if __name__ == '__main__':
# might want to add options for other arguments
assert len(sys.argv) == 2
saveFolderName = sys.argv[1]
# call a function that will return a list of numpy matrices
# create the folder
if not os.path.exists(saveFolderName):
os.makedirs(saveFolderName)
# for np matrix in list, save
pass
def return_numpy_list():
"""Return list of np matrices representing each image ROI"""
numpy_list = []
# get files from other function
# process each file, append result to list
return numpy_list
def get_file_list():
"""Return list of all .tif files in the current directory"""
file_list = []
for fname in os.listdir("./"):
if fname.endswith(".tif"):
print("Found " + fname)
file_list.append(fname)
def process_single(fname):
f = cv2.imread(fname, cv2.IMREAD_GRAYSCALE) # Need grayscale for Hough line transform
pass
|
|
23a5417e2f870a88d88aaf0683d57cc4177f020c
|
ci/deployment-tests/app5_deploymenttest.py
|
ci/deployment-tests/app5_deploymenttest.py
|
# tile-generator
#
# Copyright (c) 2015-Present Pivotal Software, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import json
import sys
import os
import requests
from tile_generator import opsmgr
def find_by_identifier(lst, id):
for item in lst:
if item['identifier'] == id:
return item
return None
class VerifyApp5(unittest.TestCase):
def setUp(self):
pass
def test_resource_config(self):
version = opsmgr.get_version()
# Resource config only 1.8+
if version[0] < 1 or version[1] < 8:
return
settings = opsmgr.get('/api/installation_settings').json()
products = settings['products']
product = find_by_identifier(products, 'test-tile')
jobs = product['jobs']
job = find_by_identifier(jobs, 'redis_z1')
job_resource_config = opsmgr.get(
'/api/v0/staged/products/{}/jobs/{}/resource_config'.format(
product['guid'],
job['guid'],
)
).json()
self.assertTrue('persistent_disk' in job_resource_config)
self.assertTrue('size_mb' in job_resource_config['persistent_disk'])
self.assertEqual(job_resource_config['persistent_disk']['size_mb'], '10240')
if __name__ == '__main__':
unittest.main()
|
Add deployment test to verify job resource config.
|
Add deployment test to verify job resource config.
|
Python
|
apache-2.0
|
cf-platform-eng/tile-generator,cf-platform-eng/tile-generator,cf-platform-eng/tile-generator,cf-platform-eng/tile-generator
|
Add deployment test to verify job resource config.
|
# tile-generator
#
# Copyright (c) 2015-Present Pivotal Software, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import json
import sys
import os
import requests
from tile_generator import opsmgr
def find_by_identifier(lst, id):
for item in lst:
if item['identifier'] == id:
return item
return None
class VerifyApp5(unittest.TestCase):
def setUp(self):
pass
def test_resource_config(self):
version = opsmgr.get_version()
# Resource config only 1.8+
if version[0] < 1 or version[1] < 8:
return
settings = opsmgr.get('/api/installation_settings').json()
products = settings['products']
product = find_by_identifier(products, 'test-tile')
jobs = product['jobs']
job = find_by_identifier(jobs, 'redis_z1')
job_resource_config = opsmgr.get(
'/api/v0/staged/products/{}/jobs/{}/resource_config'.format(
product['guid'],
job['guid'],
)
).json()
self.assertTrue('persistent_disk' in job_resource_config)
self.assertTrue('size_mb' in job_resource_config['persistent_disk'])
self.assertEqual(job_resource_config['persistent_disk']['size_mb'], '10240')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add deployment test to verify job resource config.<commit_after>
|
# tile-generator
#
# Copyright (c) 2015-Present Pivotal Software, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import json
import sys
import os
import requests
from tile_generator import opsmgr
def find_by_identifier(lst, id):
for item in lst:
if item['identifier'] == id:
return item
return None
class VerifyApp5(unittest.TestCase):
def setUp(self):
pass
def test_resource_config(self):
version = opsmgr.get_version()
# Resource config only 1.8+
if version[0] < 1 or version[1] < 8:
return
settings = opsmgr.get('/api/installation_settings').json()
products = settings['products']
product = find_by_identifier(products, 'test-tile')
jobs = product['jobs']
job = find_by_identifier(jobs, 'redis_z1')
job_resource_config = opsmgr.get(
'/api/v0/staged/products/{}/jobs/{}/resource_config'.format(
product['guid'],
job['guid'],
)
).json()
self.assertTrue('persistent_disk' in job_resource_config)
self.assertTrue('size_mb' in job_resource_config['persistent_disk'])
self.assertEqual(job_resource_config['persistent_disk']['size_mb'], '10240')
if __name__ == '__main__':
unittest.main()
|
Add deployment test to verify job resource config.# tile-generator
#
# Copyright (c) 2015-Present Pivotal Software, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import json
import sys
import os
import requests
from tile_generator import opsmgr
def find_by_identifier(lst, id):
for item in lst:
if item['identifier'] == id:
return item
return None
class VerifyApp5(unittest.TestCase):
def setUp(self):
pass
def test_resource_config(self):
version = opsmgr.get_version()
# Resource config only 1.8+
if version[0] < 1 or version[1] < 8:
return
settings = opsmgr.get('/api/installation_settings').json()
products = settings['products']
product = find_by_identifier(products, 'test-tile')
jobs = product['jobs']
job = find_by_identifier(jobs, 'redis_z1')
job_resource_config = opsmgr.get(
'/api/v0/staged/products/{}/jobs/{}/resource_config'.format(
product['guid'],
job['guid'],
)
).json()
self.assertTrue('persistent_disk' in job_resource_config)
self.assertTrue('size_mb' in job_resource_config['persistent_disk'])
self.assertEqual(job_resource_config['persistent_disk']['size_mb'], '10240')
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add deployment test to verify job resource config.<commit_after># tile-generator
#
# Copyright (c) 2015-Present Pivotal Software, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import json
import sys
import os
import requests
from tile_generator import opsmgr
def find_by_identifier(lst, id):
for item in lst:
if item['identifier'] == id:
return item
return None
class VerifyApp5(unittest.TestCase):
def setUp(self):
pass
def test_resource_config(self):
version = opsmgr.get_version()
# Resource config only 1.8+
if version[0] < 1 or version[1] < 8:
return
settings = opsmgr.get('/api/installation_settings').json()
products = settings['products']
product = find_by_identifier(products, 'test-tile')
jobs = product['jobs']
job = find_by_identifier(jobs, 'redis_z1')
job_resource_config = opsmgr.get(
'/api/v0/staged/products/{}/jobs/{}/resource_config'.format(
product['guid'],
job['guid'],
)
).json()
self.assertTrue('persistent_disk' in job_resource_config)
self.assertTrue('size_mb' in job_resource_config['persistent_disk'])
self.assertEqual(job_resource_config['persistent_disk']['size_mb'], '10240')
if __name__ == '__main__':
unittest.main()
|
|
6cd920e088d0a755644e380807db61a472a03eae
|
spacy/tests/regression/test_issue2772.py
|
spacy/tests/regression/test_issue2772.py
|
'''Test that deprojectivization doesn't mess up sentence boundaries.'''
import pytest
from ...syntax.nonproj import projectivize, deprojectivize
from ..util import get_doc
@pytest.mark.xfail
def test_issue2772(en_vocab):
words = 'When we write or communicate virtually , we can hide our true feelings .'.split()
# A tree with a non-projective (i.e. crossing) arc
# The arcs (0, 4) and (2, 9) cross.
heads = [4, 1, 7, -1, -1, -1, 3, 2, 1, 0, 2, 1, -1, -1]
deps = ['dep'] * len(heads)
heads, deps = projectivize(heads, deps)
doc = get_doc(en_vocab, words=words, heads=heads, deps=deps)
assert doc[0].is_sent_start == True
assert doc[1].is_sent_start is None
deprojectivize(doc)
assert doc[0].is_sent_start == True
assert doc[1].is_sent_start is None
|
Add xfail test for deprojectivization SBD bug
|
Add xfail test for deprojectivization SBD bug
|
Python
|
mit
|
honnibal/spaCy,explosion/spaCy,spacy-io/spaCy,honnibal/spaCy,explosion/spaCy,spacy-io/spaCy,explosion/spaCy,spacy-io/spaCy,spacy-io/spaCy,explosion/spaCy,explosion/spaCy,honnibal/spaCy,honnibal/spaCy,spacy-io/spaCy,explosion/spaCy,spacy-io/spaCy
|
Add xfail test for deprojectivization SBD bug
|
'''Test that deprojectivization doesn't mess up sentence boundaries.'''
import pytest
from ...syntax.nonproj import projectivize, deprojectivize
from ..util import get_doc
@pytest.mark.xfail
def test_issue2772(en_vocab):
words = 'When we write or communicate virtually , we can hide our true feelings .'.split()
# A tree with a non-projective (i.e. crossing) arc
# The arcs (0, 4) and (2, 9) cross.
heads = [4, 1, 7, -1, -1, -1, 3, 2, 1, 0, 2, 1, -1, -1]
deps = ['dep'] * len(heads)
heads, deps = projectivize(heads, deps)
doc = get_doc(en_vocab, words=words, heads=heads, deps=deps)
assert doc[0].is_sent_start == True
assert doc[1].is_sent_start is None
deprojectivize(doc)
assert doc[0].is_sent_start == True
assert doc[1].is_sent_start is None
|
<commit_before><commit_msg>Add xfail test for deprojectivization SBD bug<commit_after>
|
'''Test that deprojectivization doesn't mess up sentence boundaries.'''
import pytest
from ...syntax.nonproj import projectivize, deprojectivize
from ..util import get_doc
@pytest.mark.xfail
def test_issue2772(en_vocab):
words = 'When we write or communicate virtually , we can hide our true feelings .'.split()
# A tree with a non-projective (i.e. crossing) arc
# The arcs (0, 4) and (2, 9) cross.
heads = [4, 1, 7, -1, -1, -1, 3, 2, 1, 0, 2, 1, -1, -1]
deps = ['dep'] * len(heads)
heads, deps = projectivize(heads, deps)
doc = get_doc(en_vocab, words=words, heads=heads, deps=deps)
assert doc[0].is_sent_start == True
assert doc[1].is_sent_start is None
deprojectivize(doc)
assert doc[0].is_sent_start == True
assert doc[1].is_sent_start is None
|
Add xfail test for deprojectivization SBD bug'''Test that deprojectivization doesn't mess up sentence boundaries.'''
import pytest
from ...syntax.nonproj import projectivize, deprojectivize
from ..util import get_doc
@pytest.mark.xfail
def test_issue2772(en_vocab):
words = 'When we write or communicate virtually , we can hide our true feelings .'.split()
# A tree with a non-projective (i.e. crossing) arc
# The arcs (0, 4) and (2, 9) cross.
heads = [4, 1, 7, -1, -1, -1, 3, 2, 1, 0, 2, 1, -1, -1]
deps = ['dep'] * len(heads)
heads, deps = projectivize(heads, deps)
doc = get_doc(en_vocab, words=words, heads=heads, deps=deps)
assert doc[0].is_sent_start == True
assert doc[1].is_sent_start is None
deprojectivize(doc)
assert doc[0].is_sent_start == True
assert doc[1].is_sent_start is None
|
<commit_before><commit_msg>Add xfail test for deprojectivization SBD bug<commit_after>'''Test that deprojectivization doesn't mess up sentence boundaries.'''
import pytest
from ...syntax.nonproj import projectivize, deprojectivize
from ..util import get_doc
@pytest.mark.xfail
def test_issue2772(en_vocab):
words = 'When we write or communicate virtually , we can hide our true feelings .'.split()
# A tree with a non-projective (i.e. crossing) arc
# The arcs (0, 4) and (2, 9) cross.
heads = [4, 1, 7, -1, -1, -1, 3, 2, 1, 0, 2, 1, -1, -1]
deps = ['dep'] * len(heads)
heads, deps = projectivize(heads, deps)
doc = get_doc(en_vocab, words=words, heads=heads, deps=deps)
assert doc[0].is_sent_start == True
assert doc[1].is_sent_start is None
deprojectivize(doc)
assert doc[0].is_sent_start == True
assert doc[1].is_sent_start is None
|
|
2e570988c4be84a6bdbe7bc252feb553d59c4ef2
|
wsgi.py
|
wsgi.py
|
import os
import sys
here = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(here, 'demonstrare'))
config = os.path.join(here, 'production.ini')
from pyramid.paster import get_app
application = get_app(config, 'main')
|
Create WSGI file for deployment
|
Create WSGI file for deployment
|
Python
|
mit
|
josuemontano/pyramid-angularjs-starter,josuemontano/api-starter,josuemontano/pyramid-angularjs-starter,josuemontano/API-platform,josuemontano/API-platform,josuemontano/API-platform,josuemontano/api-starter,josuemontano/api-starter,josuemontano/pyramid-angularjs-starter,josuemontano/API-platform
|
Create WSGI file for deployment
|
import os
import sys
here = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(here, 'demonstrare'))
config = os.path.join(here, 'production.ini')
from pyramid.paster import get_app
application = get_app(config, 'main')
|
<commit_before><commit_msg>Create WSGI file for deployment<commit_after>
|
import os
import sys
here = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(here, 'demonstrare'))
config = os.path.join(here, 'production.ini')
from pyramid.paster import get_app
application = get_app(config, 'main')
|
Create WSGI file for deploymentimport os
import sys
here = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(here, 'demonstrare'))
config = os.path.join(here, 'production.ini')
from pyramid.paster import get_app
application = get_app(config, 'main')
|
<commit_before><commit_msg>Create WSGI file for deployment<commit_after>import os
import sys
here = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, os.path.join(here, 'demonstrare'))
config = os.path.join(here, 'production.ini')
from pyramid.paster import get_app
application = get_app(config, 'main')
|
|
4b2d23abbb5ef3267eae2b53bf70dfa9c62c868b
|
tests/test/xie/graphics/drawing.py
|
tests/test/xie/graphics/drawing.py
|
import unittest
from xie.graphics.drawing import DrawingSystem
from xie.graphics.canvas import EncodedTextCanvasController
from xie.graphics.factory import ShapeFactory
class DrawingSystemTestCase(unittest.TestCase):
def setUp(self):
self.controller = EncodedTextCanvasController()
self.ds = DrawingSystem(self.controller)
self.shapeFactory = ShapeFactory()
def tearDown(self):
pass
def test_draw_stroke_1(self):
stroke = self.shapeFactory.generateStrokeByParameters("橫", [222], startPoint=(20, 123))
self.ds.draw(stroke)
self.assertEqual("0.20.123,1.242.123", self.controller.getStrokeExpression())
def test_draw_stroke_2(self):
stroke = self.shapeFactory.generateStrokeByParameters("豎", [211], startPoint=(124, 27))
self.ds.draw(stroke)
self.assertEqual("0.124.27,1.124.238", self.controller.getStrokeExpression())
def test_draw_stroke_3(self):
stroke = self.shapeFactory.generateStrokeByParameters("豎彎", [146, 126, 32], startPoint=(43, 54))
self.ds.draw(stroke)
self.assertEqual("0.43.54,1.43.180,2.43.212,1.75.212,1.221.212", self.controller.getStrokeExpression())
def test_translate(self):
stroke = self.shapeFactory.generateStrokeByParameters("橫", [222], startPoint=(20, 123))
self.ds.translate(29, 105)
self.ds.draw(stroke)
self.assertEqual("0.49.228,1.271.228", self.controller.getStrokeExpression())
def test_scale(self):
stroke = self.shapeFactory.generateStrokeByParameters("橫", [222], startPoint=(20, 123))
self.ds.scale(0.5, 1.2)
self.ds.draw(stroke)
self.assertEqual("0.10.148,1.121.148", self.controller.getStrokeExpression())
def test_complex_transform(self):
stroke = self.shapeFactory.generateStrokeByParameters("橫", [222], startPoint=(20, 123))
self.ds.translate(-10, -110)
self.ds.scale(0.5, 1.2)
self.ds.translate(26, 80)
self.ds.draw(stroke)
self.assertEqual("0.31.96,1.142.96", self.controller.getStrokeExpression())
|
Add test cases for DrawingSystem
|
[Test] Add test cases for DrawingSystem
|
Python
|
apache-2.0
|
xrloong/Xie
|
[Test] Add test cases for DrawingSystem
|
import unittest
from xie.graphics.drawing import DrawingSystem
from xie.graphics.canvas import EncodedTextCanvasController
from xie.graphics.factory import ShapeFactory
class DrawingSystemTestCase(unittest.TestCase):
def setUp(self):
self.controller = EncodedTextCanvasController()
self.ds = DrawingSystem(self.controller)
self.shapeFactory = ShapeFactory()
def tearDown(self):
pass
def test_draw_stroke_1(self):
stroke = self.shapeFactory.generateStrokeByParameters("橫", [222], startPoint=(20, 123))
self.ds.draw(stroke)
self.assertEqual("0.20.123,1.242.123", self.controller.getStrokeExpression())
def test_draw_stroke_2(self):
stroke = self.shapeFactory.generateStrokeByParameters("豎", [211], startPoint=(124, 27))
self.ds.draw(stroke)
self.assertEqual("0.124.27,1.124.238", self.controller.getStrokeExpression())
def test_draw_stroke_3(self):
stroke = self.shapeFactory.generateStrokeByParameters("豎彎", [146, 126, 32], startPoint=(43, 54))
self.ds.draw(stroke)
self.assertEqual("0.43.54,1.43.180,2.43.212,1.75.212,1.221.212", self.controller.getStrokeExpression())
def test_translate(self):
stroke = self.shapeFactory.generateStrokeByParameters("橫", [222], startPoint=(20, 123))
self.ds.translate(29, 105)
self.ds.draw(stroke)
self.assertEqual("0.49.228,1.271.228", self.controller.getStrokeExpression())
def test_scale(self):
stroke = self.shapeFactory.generateStrokeByParameters("橫", [222], startPoint=(20, 123))
self.ds.scale(0.5, 1.2)
self.ds.draw(stroke)
self.assertEqual("0.10.148,1.121.148", self.controller.getStrokeExpression())
def test_complex_transform(self):
stroke = self.shapeFactory.generateStrokeByParameters("橫", [222], startPoint=(20, 123))
self.ds.translate(-10, -110)
self.ds.scale(0.5, 1.2)
self.ds.translate(26, 80)
self.ds.draw(stroke)
self.assertEqual("0.31.96,1.142.96", self.controller.getStrokeExpression())
|
<commit_before><commit_msg>[Test] Add test cases for DrawingSystem<commit_after>
|
import unittest
from xie.graphics.drawing import DrawingSystem
from xie.graphics.canvas import EncodedTextCanvasController
from xie.graphics.factory import ShapeFactory
class DrawingSystemTestCase(unittest.TestCase):
def setUp(self):
self.controller = EncodedTextCanvasController()
self.ds = DrawingSystem(self.controller)
self.shapeFactory = ShapeFactory()
def tearDown(self):
pass
def test_draw_stroke_1(self):
stroke = self.shapeFactory.generateStrokeByParameters("橫", [222], startPoint=(20, 123))
self.ds.draw(stroke)
self.assertEqual("0.20.123,1.242.123", self.controller.getStrokeExpression())
def test_draw_stroke_2(self):
stroke = self.shapeFactory.generateStrokeByParameters("豎", [211], startPoint=(124, 27))
self.ds.draw(stroke)
self.assertEqual("0.124.27,1.124.238", self.controller.getStrokeExpression())
def test_draw_stroke_3(self):
stroke = self.shapeFactory.generateStrokeByParameters("豎彎", [146, 126, 32], startPoint=(43, 54))
self.ds.draw(stroke)
self.assertEqual("0.43.54,1.43.180,2.43.212,1.75.212,1.221.212", self.controller.getStrokeExpression())
def test_translate(self):
stroke = self.shapeFactory.generateStrokeByParameters("橫", [222], startPoint=(20, 123))
self.ds.translate(29, 105)
self.ds.draw(stroke)
self.assertEqual("0.49.228,1.271.228", self.controller.getStrokeExpression())
def test_scale(self):
stroke = self.shapeFactory.generateStrokeByParameters("橫", [222], startPoint=(20, 123))
self.ds.scale(0.5, 1.2)
self.ds.draw(stroke)
self.assertEqual("0.10.148,1.121.148", self.controller.getStrokeExpression())
def test_complex_transform(self):
stroke = self.shapeFactory.generateStrokeByParameters("橫", [222], startPoint=(20, 123))
self.ds.translate(-10, -110)
self.ds.scale(0.5, 1.2)
self.ds.translate(26, 80)
self.ds.draw(stroke)
self.assertEqual("0.31.96,1.142.96", self.controller.getStrokeExpression())
|
[Test] Add test cases for DrawingSystemimport unittest
from xie.graphics.drawing import DrawingSystem
from xie.graphics.canvas import EncodedTextCanvasController
from xie.graphics.factory import ShapeFactory
class DrawingSystemTestCase(unittest.TestCase):
def setUp(self):
self.controller = EncodedTextCanvasController()
self.ds = DrawingSystem(self.controller)
self.shapeFactory = ShapeFactory()
def tearDown(self):
pass
def test_draw_stroke_1(self):
stroke = self.shapeFactory.generateStrokeByParameters("橫", [222], startPoint=(20, 123))
self.ds.draw(stroke)
self.assertEqual("0.20.123,1.242.123", self.controller.getStrokeExpression())
def test_draw_stroke_2(self):
stroke = self.shapeFactory.generateStrokeByParameters("豎", [211], startPoint=(124, 27))
self.ds.draw(stroke)
self.assertEqual("0.124.27,1.124.238", self.controller.getStrokeExpression())
def test_draw_stroke_3(self):
stroke = self.shapeFactory.generateStrokeByParameters("豎彎", [146, 126, 32], startPoint=(43, 54))
self.ds.draw(stroke)
self.assertEqual("0.43.54,1.43.180,2.43.212,1.75.212,1.221.212", self.controller.getStrokeExpression())
def test_translate(self):
stroke = self.shapeFactory.generateStrokeByParameters("橫", [222], startPoint=(20, 123))
self.ds.translate(29, 105)
self.ds.draw(stroke)
self.assertEqual("0.49.228,1.271.228", self.controller.getStrokeExpression())
def test_scale(self):
stroke = self.shapeFactory.generateStrokeByParameters("橫", [222], startPoint=(20, 123))
self.ds.scale(0.5, 1.2)
self.ds.draw(stroke)
self.assertEqual("0.10.148,1.121.148", self.controller.getStrokeExpression())
def test_complex_transform(self):
stroke = self.shapeFactory.generateStrokeByParameters("橫", [222], startPoint=(20, 123))
self.ds.translate(-10, -110)
self.ds.scale(0.5, 1.2)
self.ds.translate(26, 80)
self.ds.draw(stroke)
self.assertEqual("0.31.96,1.142.96", self.controller.getStrokeExpression())
|
<commit_before><commit_msg>[Test] Add test cases for DrawingSystem<commit_after>import unittest
from xie.graphics.drawing import DrawingSystem
from xie.graphics.canvas import EncodedTextCanvasController
from xie.graphics.factory import ShapeFactory
class DrawingSystemTestCase(unittest.TestCase):
def setUp(self):
self.controller = EncodedTextCanvasController()
self.ds = DrawingSystem(self.controller)
self.shapeFactory = ShapeFactory()
def tearDown(self):
pass
def test_draw_stroke_1(self):
stroke = self.shapeFactory.generateStrokeByParameters("橫", [222], startPoint=(20, 123))
self.ds.draw(stroke)
self.assertEqual("0.20.123,1.242.123", self.controller.getStrokeExpression())
def test_draw_stroke_2(self):
stroke = self.shapeFactory.generateStrokeByParameters("豎", [211], startPoint=(124, 27))
self.ds.draw(stroke)
self.assertEqual("0.124.27,1.124.238", self.controller.getStrokeExpression())
def test_draw_stroke_3(self):
stroke = self.shapeFactory.generateStrokeByParameters("豎彎", [146, 126, 32], startPoint=(43, 54))
self.ds.draw(stroke)
self.assertEqual("0.43.54,1.43.180,2.43.212,1.75.212,1.221.212", self.controller.getStrokeExpression())
def test_translate(self):
stroke = self.shapeFactory.generateStrokeByParameters("橫", [222], startPoint=(20, 123))
self.ds.translate(29, 105)
self.ds.draw(stroke)
self.assertEqual("0.49.228,1.271.228", self.controller.getStrokeExpression())
def test_scale(self):
stroke = self.shapeFactory.generateStrokeByParameters("橫", [222], startPoint=(20, 123))
self.ds.scale(0.5, 1.2)
self.ds.draw(stroke)
self.assertEqual("0.10.148,1.121.148", self.controller.getStrokeExpression())
def test_complex_transform(self):
stroke = self.shapeFactory.generateStrokeByParameters("橫", [222], startPoint=(20, 123))
self.ds.translate(-10, -110)
self.ds.scale(0.5, 1.2)
self.ds.translate(26, 80)
self.ds.draw(stroke)
self.assertEqual("0.31.96,1.142.96", self.controller.getStrokeExpression())
|
|
b61ac879fb2869acf84bb30386b08789e618aed0
|
utilities/make_agasc_supplement.py
|
utilities/make_agasc_supplement.py
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Create the initial version of agasc_supplement.h5.
This file is a supplement to the stable AGASC to inform star selection
and star catalog checking.
This script simply creates the initial file that has only bad
stars from two sources:
- starcheck bad star list
https://github.com/sot/starcheck/blob/master/starcheck/data/agasc.bad
- GAIA high proper motion file $SKA/analysis/gaia/agasc_gaia_xmatch_PM_gt_50mas.fits.gz
See: https://nbviewer.jupyter.org/url/cxc.cfa.harvard.edu/mta/ASPECT/ipynb/star_selection/gaia
GAIA guide star crossmatch.ipynb
"""
import os
from pathlib import Path
import numpy as np
from astropy.table import Table
HOME = Path(os.environ['HOME'])
SKA = Path(os.environ['SKA'])
agasc_ids = []
sources = []
# Starcheck bad star list is not installed anywhere so just grab from local git repo
lines = open(HOME / 'git' / 'starcheck' / 'starcheck' / 'data' / 'agasc.bad', 'r').readlines()
for line in lines:
line = line.strip()
if line.startswith('#'):
continue
agasc_ids.append(line.split()[0])
sources.append(1) # source=1 implies this is from the starcheck agasc.bad file
# GAIA
dat = Table.read(SKA / 'analysis' / 'gaia' / 'agasc_gaia_xmatch_PM_gt_50mas.fits.gz')
agasc_ids.extend(dat['AGASC_ID'].tolist())
sources.extend([2] * len(dat))
agasc_ids = np.array(agasc_ids, dtype=np.int32)
sources = np.array(sources, dtype=np.int16)
out = Table([agasc_ids, sources], names=['agasc_id', 'source'])
out.write('agasc_supplement.h5', format='hdf5', path='bads')
|
Add utility script to create initial agasc_supplement.h5
|
Add utility script to create initial agasc_supplement.h5
|
Python
|
bsd-3-clause
|
sot/mica,sot/mica
|
Add utility script to create initial agasc_supplement.h5
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Create the initial version of agasc_supplement.h5.
This file is a supplement to the stable AGASC to inform star selection
and star catalog checking.
This script simply creates the initial file that has only bad
stars from two sources:
- starcheck bad star list
https://github.com/sot/starcheck/blob/master/starcheck/data/agasc.bad
- GAIA high proper motion file $SKA/analysis/gaia/agasc_gaia_xmatch_PM_gt_50mas.fits.gz
See: https://nbviewer.jupyter.org/url/cxc.cfa.harvard.edu/mta/ASPECT/ipynb/star_selection/gaia
GAIA guide star crossmatch.ipynb
"""
import os
from pathlib import Path
import numpy as np
from astropy.table import Table
HOME = Path(os.environ['HOME'])
SKA = Path(os.environ['SKA'])
agasc_ids = []
sources = []
# Starcheck bad star list is not installed anywhere so just grab from local git repo
lines = open(HOME / 'git' / 'starcheck' / 'starcheck' / 'data' / 'agasc.bad', 'r').readlines()
for line in lines:
line = line.strip()
if line.startswith('#'):
continue
agasc_ids.append(line.split()[0])
sources.append(1) # source=1 implies this is from the starcheck agasc.bad file
# GAIA
dat = Table.read(SKA / 'analysis' / 'gaia' / 'agasc_gaia_xmatch_PM_gt_50mas.fits.gz')
agasc_ids.extend(dat['AGASC_ID'].tolist())
sources.extend([2] * len(dat))
agasc_ids = np.array(agasc_ids, dtype=np.int32)
sources = np.array(sources, dtype=np.int16)
out = Table([agasc_ids, sources], names=['agasc_id', 'source'])
out.write('agasc_supplement.h5', format='hdf5', path='bads')
|
<commit_before><commit_msg>Add utility script to create initial agasc_supplement.h5<commit_after>
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Create the initial version of agasc_supplement.h5.
This file is a supplement to the stable AGASC to inform star selection
and star catalog checking.
This script simply creates the initial file that has only bad
stars from two sources:
- starcheck bad star list
https://github.com/sot/starcheck/blob/master/starcheck/data/agasc.bad
- GAIA high proper motion file $SKA/analysis/gaia/agasc_gaia_xmatch_PM_gt_50mas.fits.gz
See: https://nbviewer.jupyter.org/url/cxc.cfa.harvard.edu/mta/ASPECT/ipynb/star_selection/gaia
GAIA guide star crossmatch.ipynb
"""
import os
from pathlib import Path
import numpy as np
from astropy.table import Table
HOME = Path(os.environ['HOME'])
SKA = Path(os.environ['SKA'])
agasc_ids = []
sources = []
# Starcheck bad star list is not installed anywhere so just grab from local git repo
lines = open(HOME / 'git' / 'starcheck' / 'starcheck' / 'data' / 'agasc.bad', 'r').readlines()
for line in lines:
line = line.strip()
if line.startswith('#'):
continue
agasc_ids.append(line.split()[0])
sources.append(1) # source=1 implies this is from the starcheck agasc.bad file
# GAIA
dat = Table.read(SKA / 'analysis' / 'gaia' / 'agasc_gaia_xmatch_PM_gt_50mas.fits.gz')
agasc_ids.extend(dat['AGASC_ID'].tolist())
sources.extend([2] * len(dat))
agasc_ids = np.array(agasc_ids, dtype=np.int32)
sources = np.array(sources, dtype=np.int16)
out = Table([agasc_ids, sources], names=['agasc_id', 'source'])
out.write('agasc_supplement.h5', format='hdf5', path='bads')
|
Add utility script to create initial agasc_supplement.h5# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Create the initial version of agasc_supplement.h5.
This file is a supplement to the stable AGASC to inform star selection
and star catalog checking.
This script simply creates the initial file that has only bad
stars from two sources:
- starcheck bad star list
https://github.com/sot/starcheck/blob/master/starcheck/data/agasc.bad
- GAIA high proper motion file $SKA/analysis/gaia/agasc_gaia_xmatch_PM_gt_50mas.fits.gz
See: https://nbviewer.jupyter.org/url/cxc.cfa.harvard.edu/mta/ASPECT/ipynb/star_selection/gaia
GAIA guide star crossmatch.ipynb
"""
import os
from pathlib import Path
import numpy as np
from astropy.table import Table
HOME = Path(os.environ['HOME'])
SKA = Path(os.environ['SKA'])
agasc_ids = []
sources = []
# Starcheck bad star list is not installed anywhere so just grab from local git repo
lines = open(HOME / 'git' / 'starcheck' / 'starcheck' / 'data' / 'agasc.bad', 'r').readlines()
for line in lines:
line = line.strip()
if line.startswith('#'):
continue
agasc_ids.append(line.split()[0])
sources.append(1) # source=1 implies this is from the starcheck agasc.bad file
# GAIA
dat = Table.read(SKA / 'analysis' / 'gaia' / 'agasc_gaia_xmatch_PM_gt_50mas.fits.gz')
agasc_ids.extend(dat['AGASC_ID'].tolist())
sources.extend([2] * len(dat))
agasc_ids = np.array(agasc_ids, dtype=np.int32)
sources = np.array(sources, dtype=np.int16)
out = Table([agasc_ids, sources], names=['agasc_id', 'source'])
out.write('agasc_supplement.h5', format='hdf5', path='bads')
|
<commit_before><commit_msg>Add utility script to create initial agasc_supplement.h5<commit_after># Licensed under a 3-clause BSD style license - see LICENSE.rst
"""
Create the initial version of agasc_supplement.h5.
This file is a supplement to the stable AGASC to inform star selection
and star catalog checking.
This script simply creates the initial file that has only bad
stars from two sources:
- starcheck bad star list
https://github.com/sot/starcheck/blob/master/starcheck/data/agasc.bad
- GAIA high proper motion file $SKA/analysis/gaia/agasc_gaia_xmatch_PM_gt_50mas.fits.gz
See: https://nbviewer.jupyter.org/url/cxc.cfa.harvard.edu/mta/ASPECT/ipynb/star_selection/gaia
GAIA guide star crossmatch.ipynb
"""
import os
from pathlib import Path
import numpy as np
from astropy.table import Table
HOME = Path(os.environ['HOME'])
SKA = Path(os.environ['SKA'])
agasc_ids = []
sources = []
# Starcheck bad star list is not installed anywhere so just grab from local git repo
lines = open(HOME / 'git' / 'starcheck' / 'starcheck' / 'data' / 'agasc.bad', 'r').readlines()
for line in lines:
line = line.strip()
if line.startswith('#'):
continue
agasc_ids.append(line.split()[0])
sources.append(1) # source=1 implies this is from the starcheck agasc.bad file
# GAIA
dat = Table.read(SKA / 'analysis' / 'gaia' / 'agasc_gaia_xmatch_PM_gt_50mas.fits.gz')
agasc_ids.extend(dat['AGASC_ID'].tolist())
sources.extend([2] * len(dat))
agasc_ids = np.array(agasc_ids, dtype=np.int32)
sources = np.array(sources, dtype=np.int16)
out = Table([agasc_ids, sources], names=['agasc_id', 'source'])
out.write('agasc_supplement.h5', format='hdf5', path='bads')
|
|
01dbfc5617c094913832302383410f19a2cde088
|
toggle_cap_letters.py
|
toggle_cap_letters.py
|
import sublime, sublime_plugin
import re
def toggle(pattern, word, transformer):
for match in pattern.finditer(word):
substr = match.group()
word = word.replace(substr, transformer(substr))
return word
def mixed_to_underscore(word):
return '_' + word.lower()
def underscore_to_mixed(word):
return word.replace('_','').capitalize()
def is_letter_type(pattern, word):
return bool(pattern.search(word))
class ToggleCapLettersCommand(sublime_plugin.TextCommand):
''' This plugin transforms the select words from
setVarName -> set_var_name or
set_var_name -> setVarName
'''
mixed_cap_letters = re.compile("[A-Z][a-z]+")
underscore_letters = re.compile("[_][a-z]+")
def run(self, edit, *args):
for point in self.view.sel():
word_region = self.view.word(point)
word = self.view.substr(word_region)
new_word = ''
if is_letter_type(self.mixed_cap_letters, word):
new_word = toggle(self.mixed_cap_letters, word, mixed_to_underscore)
elif is_letter_type(self.underscore_letters, word):
new_word = toggle(self.underscore_letters, word, underscore_to_mixed)
if new_word:
self.view.erase(edit, word_region)
self.view.insert(edit, word_region.begin(), new_word)
|
Add a plugin that toggles mixed cap letters to underscore styles.
|
Add a plugin that toggles mixed cap letters to underscore styles.
|
Python
|
mit
|
shaochuan/sublime-plugins
|
Add a plugin that toggles mixed cap letters to underscore styles.
|
import sublime, sublime_plugin
import re
def toggle(pattern, word, transformer):
for match in pattern.finditer(word):
substr = match.group()
word = word.replace(substr, transformer(substr))
return word
def mixed_to_underscore(word):
return '_' + word.lower()
def underscore_to_mixed(word):
return word.replace('_','').capitalize()
def is_letter_type(pattern, word):
return bool(pattern.search(word))
class ToggleCapLettersCommand(sublime_plugin.TextCommand):
''' This plugin transforms the select words from
setVarName -> set_var_name or
set_var_name -> setVarName
'''
mixed_cap_letters = re.compile("[A-Z][a-z]+")
underscore_letters = re.compile("[_][a-z]+")
def run(self, edit, *args):
for point in self.view.sel():
word_region = self.view.word(point)
word = self.view.substr(word_region)
new_word = ''
if is_letter_type(self.mixed_cap_letters, word):
new_word = toggle(self.mixed_cap_letters, word, mixed_to_underscore)
elif is_letter_type(self.underscore_letters, word):
new_word = toggle(self.underscore_letters, word, underscore_to_mixed)
if new_word:
self.view.erase(edit, word_region)
self.view.insert(edit, word_region.begin(), new_word)
|
<commit_before><commit_msg>Add a plugin that toggles mixed cap letters to underscore styles.<commit_after>
|
import sublime, sublime_plugin
import re
def toggle(pattern, word, transformer):
for match in pattern.finditer(word):
substr = match.group()
word = word.replace(substr, transformer(substr))
return word
def mixed_to_underscore(word):
return '_' + word.lower()
def underscore_to_mixed(word):
return word.replace('_','').capitalize()
def is_letter_type(pattern, word):
return bool(pattern.search(word))
class ToggleCapLettersCommand(sublime_plugin.TextCommand):
''' This plugin transforms the select words from
setVarName -> set_var_name or
set_var_name -> setVarName
'''
mixed_cap_letters = re.compile("[A-Z][a-z]+")
underscore_letters = re.compile("[_][a-z]+")
def run(self, edit, *args):
for point in self.view.sel():
word_region = self.view.word(point)
word = self.view.substr(word_region)
new_word = ''
if is_letter_type(self.mixed_cap_letters, word):
new_word = toggle(self.mixed_cap_letters, word, mixed_to_underscore)
elif is_letter_type(self.underscore_letters, word):
new_word = toggle(self.underscore_letters, word, underscore_to_mixed)
if new_word:
self.view.erase(edit, word_region)
self.view.insert(edit, word_region.begin(), new_word)
|
Add a plugin that toggles mixed cap letters to underscore styles.import sublime, sublime_plugin
import re
def toggle(pattern, word, transformer):
for match in pattern.finditer(word):
substr = match.group()
word = word.replace(substr, transformer(substr))
return word
def mixed_to_underscore(word):
return '_' + word.lower()
def underscore_to_mixed(word):
return word.replace('_','').capitalize()
def is_letter_type(pattern, word):
return bool(pattern.search(word))
class ToggleCapLettersCommand(sublime_plugin.TextCommand):
''' This plugin transforms the select words from
setVarName -> set_var_name or
set_var_name -> setVarName
'''
mixed_cap_letters = re.compile("[A-Z][a-z]+")
underscore_letters = re.compile("[_][a-z]+")
def run(self, edit, *args):
for point in self.view.sel():
word_region = self.view.word(point)
word = self.view.substr(word_region)
new_word = ''
if is_letter_type(self.mixed_cap_letters, word):
new_word = toggle(self.mixed_cap_letters, word, mixed_to_underscore)
elif is_letter_type(self.underscore_letters, word):
new_word = toggle(self.underscore_letters, word, underscore_to_mixed)
if new_word:
self.view.erase(edit, word_region)
self.view.insert(edit, word_region.begin(), new_word)
|
<commit_before><commit_msg>Add a plugin that toggles mixed cap letters to underscore styles.<commit_after>import sublime, sublime_plugin
import re
def toggle(pattern, word, transformer):
for match in pattern.finditer(word):
substr = match.group()
word = word.replace(substr, transformer(substr))
return word
def mixed_to_underscore(word):
return '_' + word.lower()
def underscore_to_mixed(word):
return word.replace('_','').capitalize()
def is_letter_type(pattern, word):
return bool(pattern.search(word))
class ToggleCapLettersCommand(sublime_plugin.TextCommand):
''' This plugin transforms the select words from
setVarName -> set_var_name or
set_var_name -> setVarName
'''
mixed_cap_letters = re.compile("[A-Z][a-z]+")
underscore_letters = re.compile("[_][a-z]+")
def run(self, edit, *args):
for point in self.view.sel():
word_region = self.view.word(point)
word = self.view.substr(word_region)
new_word = ''
if is_letter_type(self.mixed_cap_letters, word):
new_word = toggle(self.mixed_cap_letters, word, mixed_to_underscore)
elif is_letter_type(self.underscore_letters, word):
new_word = toggle(self.underscore_letters, word, underscore_to_mixed)
if new_word:
self.view.erase(edit, word_region)
self.view.insert(edit, word_region.begin(), new_word)
|
|
644de5b5ed459e38cd073ec35943154cfe204e4f
|
tools/run_coverage.py
|
tools/run_coverage.py
|
#!/usr/bin/env python
"""Generate coverage reports"""
import os
print('Running code coverage. This will take a minute or two to run the tests.')
os.system("coverage run --rcfile=.coveragerc manage.py test -v1")
print('Tests completed.')
print('Generating code coverage report')
os.system("coverage report")
print('Generating html report of code coverage')
os.system("coverage html")
print('html report completed. See "oh-mainline/coverage_html_report/index.html"')
|
Add tool to run coverage and reports
|
Add tool to run coverage and reports
|
Python
|
agpl-3.0
|
sudheesh001/oh-mainline,vipul-sharma20/oh-mainline,campbe13/openhatch,onceuponatimeforever/oh-mainline,openhatch/oh-mainline,moijes12/oh-mainline,ojengwa/oh-mainline,waseem18/oh-mainline,heeraj123/oh-mainline,openhatch/oh-mainline,willingc/oh-mainline,ehashman/oh-mainline,vipul-sharma20/oh-mainline,ehashman/oh-mainline,vipul-sharma20/oh-mainline,Changaco/oh-mainline,willingc/oh-mainline,heeraj123/oh-mainline,Changaco/oh-mainline,campbe13/openhatch,moijes12/oh-mainline,onceuponatimeforever/oh-mainline,vipul-sharma20/oh-mainline,willingc/oh-mainline,vipul-sharma20/oh-mainline,ehashman/oh-mainline,openhatch/oh-mainline,ehashman/oh-mainline,ehashman/oh-mainline,campbe13/openhatch,ojengwa/oh-mainline,waseem18/oh-mainline,SnappleCap/oh-mainline,Changaco/oh-mainline,nirmeshk/oh-mainline,sudheesh001/oh-mainline,moijes12/oh-mainline,nirmeshk/oh-mainline,eeshangarg/oh-mainline,ojengwa/oh-mainline,campbe13/openhatch,willingc/oh-mainline,eeshangarg/oh-mainline,sudheesh001/oh-mainline,ojengwa/oh-mainline,heeraj123/oh-mainline,openhatch/oh-mainline,onceuponatimeforever/oh-mainline,openhatch/oh-mainline,onceuponatimeforever/oh-mainline,waseem18/oh-mainline,nirmeshk/oh-mainline,Changaco/oh-mainline,eeshangarg/oh-mainline,moijes12/oh-mainline,ojengwa/oh-mainline,willingc/oh-mainline,sudheesh001/oh-mainline,campbe13/openhatch,sudheesh001/oh-mainline,SnappleCap/oh-mainline,nirmeshk/oh-mainline,eeshangarg/oh-mainline,SnappleCap/oh-mainline,heeraj123/oh-mainline,moijes12/oh-mainline,eeshangarg/oh-mainline,waseem18/oh-mainline,onceuponatimeforever/oh-mainline,SnappleCap/oh-mainline,SnappleCap/oh-mainline,nirmeshk/oh-mainline,waseem18/oh-mainline,Changaco/oh-mainline,heeraj123/oh-mainline
|
Add tool to run coverage and reports
|
#!/usr/bin/env python
"""Generate coverage reports"""
import os
print('Running code coverage. This will take a minute or two to run the tests.')
os.system("coverage run --rcfile=.coveragerc manage.py test -v1")
print('Tests completed.')
print('Generating code coverage report')
os.system("coverage report")
print('Generating html report of code coverage')
os.system("coverage html")
print('html report completed. See "oh-mainline/coverage_html_report/index.html"')
|
<commit_before><commit_msg>Add tool to run coverage and reports<commit_after>
|
#!/usr/bin/env python
"""Generate coverage reports"""
import os
print('Running code coverage. This will take a minute or two to run the tests.')
os.system("coverage run --rcfile=.coveragerc manage.py test -v1")
print('Tests completed.')
print('Generating code coverage report')
os.system("coverage report")
print('Generating html report of code coverage')
os.system("coverage html")
print('html report completed. See "oh-mainline/coverage_html_report/index.html"')
|
Add tool to run coverage and reports#!/usr/bin/env python
"""Generate coverage reports"""
import os
print('Running code coverage. This will take a minute or two to run the tests.')
os.system("coverage run --rcfile=.coveragerc manage.py test -v1")
print('Tests completed.')
print('Generating code coverage report')
os.system("coverage report")
print('Generating html report of code coverage')
os.system("coverage html")
print('html report completed. See "oh-mainline/coverage_html_report/index.html"')
|
<commit_before><commit_msg>Add tool to run coverage and reports<commit_after>#!/usr/bin/env python
"""Generate coverage reports"""
import os
print('Running code coverage. This will take a minute or two to run the tests.')
os.system("coverage run --rcfile=.coveragerc manage.py test -v1")
print('Tests completed.')
print('Generating code coverage report')
os.system("coverage report")
print('Generating html report of code coverage')
os.system("coverage html")
print('html report completed. See "oh-mainline/coverage_html_report/index.html"')
|
|
97c25703904a0f2508238d4268259692f9e7a665
|
test/integration/ggrc/converters/test_import_automappings.py
|
test/integration/ggrc/converters/test_import_automappings.py
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from ggrc.models import Relationship
from ggrc.converters import errors
from integration.ggrc.converters import TestCase
from integration.ggrc.generator import ObjectGenerator
class TestBasicCsvImport(TestCase):
def setUp(self):
TestCase.setUp(self)
self.generator = ObjectGenerator()
self.client.get("/login")
def test_basic_automappings(self):
filename = "automappings.csv"
response = self.import_file(filename)
data = [{
"object_name": "Program",
"filters": {
"expression": {
"left": "title",
"op": {"name": "="},
"right": "program 1",
},
},
"fields": "all",
}]
response = self.export_csv(data)
for i in range(1, 8):
self.assertIn("reg-{}".format(i), response.data)
self.assertIn("control-{}".format(i), response.data)
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from integration.ggrc.converters import TestCase
from integration.ggrc.generator import ObjectGenerator
class TestBasicCsvImport(TestCase):
def setUp(self):
TestCase.setUp(self)
self.generator = ObjectGenerator()
self.client.get("/login")
def test_basic_automappings(self):
filename = "automappings.csv"
response = self.import_file(filename)
data = [{
"object_name": "Program",
"filters": {
"expression": {
"left": "title",
"op": {"name": "="},
"right": "program 1",
},
},
"fields": "all",
}]
response = self.export_csv(data)
for i in range(1, 8):
self.assertIn("reg-{}".format(i), response.data)
self.assertIn("control-{}".format(i), response.data)
|
Clean up import auto mappings tests
|
Clean up import auto mappings tests
|
Python
|
apache-2.0
|
edofic/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,kr41/ggrc-core,edofic/ggrc-core,selahssea/ggrc-core,plamut/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,josthkko/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,plamut/ggrc-core,selahssea/ggrc-core,AleksNeStu/ggrc-core,andrei-karalionak/ggrc-core,edofic/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,andrei-karalionak/ggrc-core,andrei-karalionak/ggrc-core,josthkko/ggrc-core,kr41/ggrc-core,selahssea/ggrc-core,j0gurt/ggrc-core,kr41/ggrc-core,j0gurt/ggrc-core,AleksNeStu/ggrc-core,edofic/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,plamut/ggrc-core,AleksNeStu/ggrc-core,selahssea/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from ggrc.models import Relationship
from ggrc.converters import errors
from integration.ggrc.converters import TestCase
from integration.ggrc.generator import ObjectGenerator
class TestBasicCsvImport(TestCase):
def setUp(self):
TestCase.setUp(self)
self.generator = ObjectGenerator()
self.client.get("/login")
def test_basic_automappings(self):
filename = "automappings.csv"
response = self.import_file(filename)
data = [{
"object_name": "Program",
"filters": {
"expression": {
"left": "title",
"op": {"name": "="},
"right": "program 1",
},
},
"fields": "all",
}]
response = self.export_csv(data)
for i in range(1, 8):
self.assertIn("reg-{}".format(i), response.data)
self.assertIn("control-{}".format(i), response.data)
Clean up import auto mappings tests
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from integration.ggrc.converters import TestCase
from integration.ggrc.generator import ObjectGenerator
class TestBasicCsvImport(TestCase):
def setUp(self):
TestCase.setUp(self)
self.generator = ObjectGenerator()
self.client.get("/login")
def test_basic_automappings(self):
filename = "automappings.csv"
response = self.import_file(filename)
data = [{
"object_name": "Program",
"filters": {
"expression": {
"left": "title",
"op": {"name": "="},
"right": "program 1",
},
},
"fields": "all",
}]
response = self.export_csv(data)
for i in range(1, 8):
self.assertIn("reg-{}".format(i), response.data)
self.assertIn("control-{}".format(i), response.data)
|
<commit_before># Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from ggrc.models import Relationship
from ggrc.converters import errors
from integration.ggrc.converters import TestCase
from integration.ggrc.generator import ObjectGenerator
class TestBasicCsvImport(TestCase):
def setUp(self):
TestCase.setUp(self)
self.generator = ObjectGenerator()
self.client.get("/login")
def test_basic_automappings(self):
filename = "automappings.csv"
response = self.import_file(filename)
data = [{
"object_name": "Program",
"filters": {
"expression": {
"left": "title",
"op": {"name": "="},
"right": "program 1",
},
},
"fields": "all",
}]
response = self.export_csv(data)
for i in range(1, 8):
self.assertIn("reg-{}".format(i), response.data)
self.assertIn("control-{}".format(i), response.data)
<commit_msg>Clean up import auto mappings tests<commit_after>
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from integration.ggrc.converters import TestCase
from integration.ggrc.generator import ObjectGenerator
class TestBasicCsvImport(TestCase):
def setUp(self):
TestCase.setUp(self)
self.generator = ObjectGenerator()
self.client.get("/login")
def test_basic_automappings(self):
filename = "automappings.csv"
response = self.import_file(filename)
data = [{
"object_name": "Program",
"filters": {
"expression": {
"left": "title",
"op": {"name": "="},
"right": "program 1",
},
},
"fields": "all",
}]
response = self.export_csv(data)
for i in range(1, 8):
self.assertIn("reg-{}".format(i), response.data)
self.assertIn("control-{}".format(i), response.data)
|
# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from ggrc.models import Relationship
from ggrc.converters import errors
from integration.ggrc.converters import TestCase
from integration.ggrc.generator import ObjectGenerator
class TestBasicCsvImport(TestCase):
def setUp(self):
TestCase.setUp(self)
self.generator = ObjectGenerator()
self.client.get("/login")
def test_basic_automappings(self):
filename = "automappings.csv"
response = self.import_file(filename)
data = [{
"object_name": "Program",
"filters": {
"expression": {
"left": "title",
"op": {"name": "="},
"right": "program 1",
},
},
"fields": "all",
}]
response = self.export_csv(data)
for i in range(1, 8):
self.assertIn("reg-{}".format(i), response.data)
self.assertIn("control-{}".format(i), response.data)
Clean up import auto mappings tests# Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from integration.ggrc.converters import TestCase
from integration.ggrc.generator import ObjectGenerator
class TestBasicCsvImport(TestCase):
def setUp(self):
TestCase.setUp(self)
self.generator = ObjectGenerator()
self.client.get("/login")
def test_basic_automappings(self):
filename = "automappings.csv"
response = self.import_file(filename)
data = [{
"object_name": "Program",
"filters": {
"expression": {
"left": "title",
"op": {"name": "="},
"right": "program 1",
},
},
"fields": "all",
}]
response = self.export_csv(data)
for i in range(1, 8):
self.assertIn("reg-{}".format(i), response.data)
self.assertIn("control-{}".format(i), response.data)
|
<commit_before># Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from ggrc.models import Relationship
from ggrc.converters import errors
from integration.ggrc.converters import TestCase
from integration.ggrc.generator import ObjectGenerator
class TestBasicCsvImport(TestCase):
def setUp(self):
TestCase.setUp(self)
self.generator = ObjectGenerator()
self.client.get("/login")
def test_basic_automappings(self):
filename = "automappings.csv"
response = self.import_file(filename)
data = [{
"object_name": "Program",
"filters": {
"expression": {
"left": "title",
"op": {"name": "="},
"right": "program 1",
},
},
"fields": "all",
}]
response = self.export_csv(data)
for i in range(1, 8):
self.assertIn("reg-{}".format(i), response.data)
self.assertIn("control-{}".format(i), response.data)
<commit_msg>Clean up import auto mappings tests<commit_after># Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
from integration.ggrc.converters import TestCase
from integration.ggrc.generator import ObjectGenerator
class TestBasicCsvImport(TestCase):
def setUp(self):
TestCase.setUp(self)
self.generator = ObjectGenerator()
self.client.get("/login")
def test_basic_automappings(self):
filename = "automappings.csv"
response = self.import_file(filename)
data = [{
"object_name": "Program",
"filters": {
"expression": {
"left": "title",
"op": {"name": "="},
"right": "program 1",
},
},
"fields": "all",
}]
response = self.export_csv(data)
for i in range(1, 8):
self.assertIn("reg-{}".format(i), response.data)
self.assertIn("control-{}".format(i), response.data)
|
27f162e8eafbc456c63043bd48bf6f09cc6ab318
|
igcollect/pf_labels.py
|
igcollect/pf_labels.py
|
#!/usr/bin/env python
"""igcollect - FreeBSD Packet Filter
Copyright (c) 2018 InnoGames GmbH
"""
from __future__ import print_function
from argparse import ArgumentParser
from socket import gethostname
from subprocess import check_output
import re
import time
def parse_args():
parser = ArgumentParser()
parser.add_argument('--prefix', default='network')
return parser.parse_args()
def parse_pf_labels():
# Get pfctl result of "show all labels"
pfctl_result = check_output(['/sbin/pfctl', '-q', '-sl'])
label_counters = {}
# Read all lines
for line in pfctl_result.splitlines():
# Split each line by ' ', this gives is the label name and values
line_tab = line.split(' ')
# Cut unnecessary things out of label
label = line_tab[0].split(':')[0]
label = re.sub('_pub$', '', label)
label = re.sub('_loc$', '', label)
if label not in label_counters:
label_counters[label] = {}
label_counters[label]['p_in'] = int(line_tab[4])
label_counters[label]['b_in'] = int(line_tab[5])
label_counters[label]['p_out'] = int(line_tab[6])
label_counters[label]['b_out'] = int(line_tab[7])
else:
label_counters[label]['p_in'] += int(line_tab[4])
label_counters[label]['b_in'] += int(line_tab[5])
label_counters[label]['p_out'] += int(line_tab[6])
label_counters[label]['b_out'] += int(line_tab[7])
return label_counters
def main():
args = parse_args()
hostname = gethostname().replace('.', '_')
now = str(int(time.time()))
label_counters = parse_pf_labels()
for label in label_counters:
for key in (
('bytesIn', 'b_in'),
('bytesOut', 'b_out'),
('pktsIn', 'p_out'),
('pktsOut', 'p_out'),
):
print('{}.{}.{}.{} {} {}'.format(
args.prefix,
label, hostname, key[0],
label_counters[label][key[1]],
now,
))
if __name__ == '__main__':
main()
|
Add script for getting pf label counters
|
Add script for getting pf label counters
|
Python
|
mit
|
innogames/igcollect
|
Add script for getting pf label counters
|
#!/usr/bin/env python
"""igcollect - FreeBSD Packet Filter
Copyright (c) 2018 InnoGames GmbH
"""
from __future__ import print_function
from argparse import ArgumentParser
from socket import gethostname
from subprocess import check_output
import re
import time
def parse_args():
parser = ArgumentParser()
parser.add_argument('--prefix', default='network')
return parser.parse_args()
def parse_pf_labels():
# Get pfctl result of "show all labels"
pfctl_result = check_output(['/sbin/pfctl', '-q', '-sl'])
label_counters = {}
# Read all lines
for line in pfctl_result.splitlines():
# Split each line by ' ', this gives is the label name and values
line_tab = line.split(' ')
# Cut unnecessary things out of label
label = line_tab[0].split(':')[0]
label = re.sub('_pub$', '', label)
label = re.sub('_loc$', '', label)
if label not in label_counters:
label_counters[label] = {}
label_counters[label]['p_in'] = int(line_tab[4])
label_counters[label]['b_in'] = int(line_tab[5])
label_counters[label]['p_out'] = int(line_tab[6])
label_counters[label]['b_out'] = int(line_tab[7])
else:
label_counters[label]['p_in'] += int(line_tab[4])
label_counters[label]['b_in'] += int(line_tab[5])
label_counters[label]['p_out'] += int(line_tab[6])
label_counters[label]['b_out'] += int(line_tab[7])
return label_counters
def main():
args = parse_args()
hostname = gethostname().replace('.', '_')
now = str(int(time.time()))
label_counters = parse_pf_labels()
for label in label_counters:
for key in (
('bytesIn', 'b_in'),
('bytesOut', 'b_out'),
('pktsIn', 'p_out'),
('pktsOut', 'p_out'),
):
print('{}.{}.{}.{} {} {}'.format(
args.prefix,
label, hostname, key[0],
label_counters[label][key[1]],
now,
))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for getting pf label counters<commit_after>
|
#!/usr/bin/env python
"""igcollect - FreeBSD Packet Filter
Copyright (c) 2018 InnoGames GmbH
"""
from __future__ import print_function
from argparse import ArgumentParser
from socket import gethostname
from subprocess import check_output
import re
import time
def parse_args():
parser = ArgumentParser()
parser.add_argument('--prefix', default='network')
return parser.parse_args()
def parse_pf_labels():
# Get pfctl result of "show all labels"
pfctl_result = check_output(['/sbin/pfctl', '-q', '-sl'])
label_counters = {}
# Read all lines
for line in pfctl_result.splitlines():
# Split each line by ' ', this gives is the label name and values
line_tab = line.split(' ')
# Cut unnecessary things out of label
label = line_tab[0].split(':')[0]
label = re.sub('_pub$', '', label)
label = re.sub('_loc$', '', label)
if label not in label_counters:
label_counters[label] = {}
label_counters[label]['p_in'] = int(line_tab[4])
label_counters[label]['b_in'] = int(line_tab[5])
label_counters[label]['p_out'] = int(line_tab[6])
label_counters[label]['b_out'] = int(line_tab[7])
else:
label_counters[label]['p_in'] += int(line_tab[4])
label_counters[label]['b_in'] += int(line_tab[5])
label_counters[label]['p_out'] += int(line_tab[6])
label_counters[label]['b_out'] += int(line_tab[7])
return label_counters
def main():
args = parse_args()
hostname = gethostname().replace('.', '_')
now = str(int(time.time()))
label_counters = parse_pf_labels()
for label in label_counters:
for key in (
('bytesIn', 'b_in'),
('bytesOut', 'b_out'),
('pktsIn', 'p_out'),
('pktsOut', 'p_out'),
):
print('{}.{}.{}.{} {} {}'.format(
args.prefix,
label, hostname, key[0],
label_counters[label][key[1]],
now,
))
if __name__ == '__main__':
main()
|
Add script for getting pf label counters#!/usr/bin/env python
"""igcollect - FreeBSD Packet Filter
Copyright (c) 2018 InnoGames GmbH
"""
from __future__ import print_function
from argparse import ArgumentParser
from socket import gethostname
from subprocess import check_output
import re
import time
def parse_args():
parser = ArgumentParser()
parser.add_argument('--prefix', default='network')
return parser.parse_args()
def parse_pf_labels():
# Get pfctl result of "show all labels"
pfctl_result = check_output(['/sbin/pfctl', '-q', '-sl'])
label_counters = {}
# Read all lines
for line in pfctl_result.splitlines():
# Split each line by ' ', this gives is the label name and values
line_tab = line.split(' ')
# Cut unnecessary things out of label
label = line_tab[0].split(':')[0]
label = re.sub('_pub$', '', label)
label = re.sub('_loc$', '', label)
if label not in label_counters:
label_counters[label] = {}
label_counters[label]['p_in'] = int(line_tab[4])
label_counters[label]['b_in'] = int(line_tab[5])
label_counters[label]['p_out'] = int(line_tab[6])
label_counters[label]['b_out'] = int(line_tab[7])
else:
label_counters[label]['p_in'] += int(line_tab[4])
label_counters[label]['b_in'] += int(line_tab[5])
label_counters[label]['p_out'] += int(line_tab[6])
label_counters[label]['b_out'] += int(line_tab[7])
return label_counters
def main():
args = parse_args()
hostname = gethostname().replace('.', '_')
now = str(int(time.time()))
label_counters = parse_pf_labels()
for label in label_counters:
for key in (
('bytesIn', 'b_in'),
('bytesOut', 'b_out'),
('pktsIn', 'p_out'),
('pktsOut', 'p_out'),
):
print('{}.{}.{}.{} {} {}'.format(
args.prefix,
label, hostname, key[0],
label_counters[label][key[1]],
now,
))
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add script for getting pf label counters<commit_after>#!/usr/bin/env python
"""igcollect - FreeBSD Packet Filter
Copyright (c) 2018 InnoGames GmbH
"""
from __future__ import print_function
from argparse import ArgumentParser
from socket import gethostname
from subprocess import check_output
import re
import time
def parse_args():
parser = ArgumentParser()
parser.add_argument('--prefix', default='network')
return parser.parse_args()
def parse_pf_labels():
# Get pfctl result of "show all labels"
pfctl_result = check_output(['/sbin/pfctl', '-q', '-sl'])
label_counters = {}
# Read all lines
for line in pfctl_result.splitlines():
# Split each line by ' ', this gives is the label name and values
line_tab = line.split(' ')
# Cut unnecessary things out of label
label = line_tab[0].split(':')[0]
label = re.sub('_pub$', '', label)
label = re.sub('_loc$', '', label)
if label not in label_counters:
label_counters[label] = {}
label_counters[label]['p_in'] = int(line_tab[4])
label_counters[label]['b_in'] = int(line_tab[5])
label_counters[label]['p_out'] = int(line_tab[6])
label_counters[label]['b_out'] = int(line_tab[7])
else:
label_counters[label]['p_in'] += int(line_tab[4])
label_counters[label]['b_in'] += int(line_tab[5])
label_counters[label]['p_out'] += int(line_tab[6])
label_counters[label]['b_out'] += int(line_tab[7])
return label_counters
def main():
args = parse_args()
hostname = gethostname().replace('.', '_')
now = str(int(time.time()))
label_counters = parse_pf_labels()
for label in label_counters:
for key in (
('bytesIn', 'b_in'),
('bytesOut', 'b_out'),
('pktsIn', 'p_out'),
('pktsOut', 'p_out'),
):
print('{}.{}.{}.{} {} {}'.format(
args.prefix,
label, hostname, key[0],
label_counters[label][key[1]],
now,
))
if __name__ == '__main__':
main()
|
|
a841ff9195448529d988227a3cfc744d88c7682d
|
scripts/local_filestore_to_s3.py
|
scripts/local_filestore_to_s3.py
|
'''
This script copies all resource files from a local FileStore directory
to a remote S3 bucket.
**It will not work for group images**
It requires SQLalchemy and Boto.
Please update the configuration details, all keys are mandatory except
AWS_STORAGE_PATH.
'''
import os
from sqlalchemy import create_engine
from sqlalchemy.sql import text
from boto.s3.connection import S3Connection
from boto.s3.key import Key
# Configuration
BASE_PATH = '/var/lib/ckan/default/resources'
SQLALCHEMY_URL = 'postgresql://user:pass@localhost/db'
AWS_ACCESS_KEY_ID = 'AKIxxxxxx'
AWS_SECRET_ACCESS_KEY = '+NGxxxxxx'
AWS_BUCKET_NAME = 'my-bucket'
AWS_STORAGE_PATH = 'some-path'
resource_ids_and_paths = {}
for root, dirs, files in os.walk(BASE_PATH):
if files:
resource_id = root.split('/')[-2] + root.split('/')[-1] + files[0]
resource_ids_and_paths[resource_id] = os.path.join(root, files[0])
print 'Found {0} resource files in the file system'.format(
len(resource_ids_and_paths.keys()))
engine = create_engine(SQLALCHEMY_URL)
connection = engine.connect()
resource_ids_and_names = {}
try:
for resource_id, file_path in resource_ids_and_paths.iteritems():
resource = connection.execute(text('''
SELECT id, url, url_type
FROM resource
WHERE id = :id
'''), id=resource_id)
if resource.rowcount:
_id, url, _type = resource.first()
if _type == 'upload' and url:
file_name = url.split('/')[-1] if '/' in url else url
resource_ids_and_names[_id] = file_name
finally:
connection.close()
engine.dispose()
print '{0} resources matched on the database'.format(
len(resource_ids_and_names.keys()))
s3_connection = S3Connection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
bucket = s3_connection.get_bucket(AWS_BUCKET_NAME)
k = Key(bucket)
uploaded_resources = []
for resource_id, file_name in resource_ids_and_names.iteritems():
k.key = 'resources/{resource_id}/{file_name}'.format(
resource_id=resource_id, file_name=file_name)
if AWS_STORAGE_PATH:
k.key = AWS_STORAGE_PATH + '/' + k.key
k.set_contents_from_filename(resource_ids_and_paths[resource_id])
uploaded_resources.append(resource_id)
print 'Uploaded resource {0} ({1}) to S3'.format(resource_id, file_name)
print 'Done, uploaded {0} resources to S3'.format(len(uploaded_resources))
|
Add script for migrating local filestore to s3
|
Add script for migrating local filestore to s3
|
Python
|
agpl-3.0
|
okfn/ckanext-s3filestore,okfn/ckanext-s3filestore
|
Add script for migrating local filestore to s3
|
'''
This script copies all resource files from a local FileStore directory
to a remote S3 bucket.
**It will not work for group images**
It requires SQLalchemy and Boto.
Please update the configuration details, all keys are mandatory except
AWS_STORAGE_PATH.
'''
import os
from sqlalchemy import create_engine
from sqlalchemy.sql import text
from boto.s3.connection import S3Connection
from boto.s3.key import Key
# Configuration
BASE_PATH = '/var/lib/ckan/default/resources'
SQLALCHEMY_URL = 'postgresql://user:pass@localhost/db'
AWS_ACCESS_KEY_ID = 'AKIxxxxxx'
AWS_SECRET_ACCESS_KEY = '+NGxxxxxx'
AWS_BUCKET_NAME = 'my-bucket'
AWS_STORAGE_PATH = 'some-path'
resource_ids_and_paths = {}
for root, dirs, files in os.walk(BASE_PATH):
if files:
resource_id = root.split('/')[-2] + root.split('/')[-1] + files[0]
resource_ids_and_paths[resource_id] = os.path.join(root, files[0])
print 'Found {0} resource files in the file system'.format(
len(resource_ids_and_paths.keys()))
engine = create_engine(SQLALCHEMY_URL)
connection = engine.connect()
resource_ids_and_names = {}
try:
for resource_id, file_path in resource_ids_and_paths.iteritems():
resource = connection.execute(text('''
SELECT id, url, url_type
FROM resource
WHERE id = :id
'''), id=resource_id)
if resource.rowcount:
_id, url, _type = resource.first()
if _type == 'upload' and url:
file_name = url.split('/')[-1] if '/' in url else url
resource_ids_and_names[_id] = file_name
finally:
connection.close()
engine.dispose()
print '{0} resources matched on the database'.format(
len(resource_ids_and_names.keys()))
s3_connection = S3Connection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
bucket = s3_connection.get_bucket(AWS_BUCKET_NAME)
k = Key(bucket)
uploaded_resources = []
for resource_id, file_name in resource_ids_and_names.iteritems():
k.key = 'resources/{resource_id}/{file_name}'.format(
resource_id=resource_id, file_name=file_name)
if AWS_STORAGE_PATH:
k.key = AWS_STORAGE_PATH + '/' + k.key
k.set_contents_from_filename(resource_ids_and_paths[resource_id])
uploaded_resources.append(resource_id)
print 'Uploaded resource {0} ({1}) to S3'.format(resource_id, file_name)
print 'Done, uploaded {0} resources to S3'.format(len(uploaded_resources))
|
<commit_before><commit_msg>Add script for migrating local filestore to s3<commit_after>
|
'''
This script copies all resource files from a local FileStore directory
to a remote S3 bucket.
**It will not work for group images**
It requires SQLalchemy and Boto.
Please update the configuration details, all keys are mandatory except
AWS_STORAGE_PATH.
'''
import os
from sqlalchemy import create_engine
from sqlalchemy.sql import text
from boto.s3.connection import S3Connection
from boto.s3.key import Key
# Configuration
BASE_PATH = '/var/lib/ckan/default/resources'
SQLALCHEMY_URL = 'postgresql://user:pass@localhost/db'
AWS_ACCESS_KEY_ID = 'AKIxxxxxx'
AWS_SECRET_ACCESS_KEY = '+NGxxxxxx'
AWS_BUCKET_NAME = 'my-bucket'
AWS_STORAGE_PATH = 'some-path'
resource_ids_and_paths = {}
for root, dirs, files in os.walk(BASE_PATH):
if files:
resource_id = root.split('/')[-2] + root.split('/')[-1] + files[0]
resource_ids_and_paths[resource_id] = os.path.join(root, files[0])
print 'Found {0} resource files in the file system'.format(
len(resource_ids_and_paths.keys()))
engine = create_engine(SQLALCHEMY_URL)
connection = engine.connect()
resource_ids_and_names = {}
try:
for resource_id, file_path in resource_ids_and_paths.iteritems():
resource = connection.execute(text('''
SELECT id, url, url_type
FROM resource
WHERE id = :id
'''), id=resource_id)
if resource.rowcount:
_id, url, _type = resource.first()
if _type == 'upload' and url:
file_name = url.split('/')[-1] if '/' in url else url
resource_ids_and_names[_id] = file_name
finally:
connection.close()
engine.dispose()
print '{0} resources matched on the database'.format(
len(resource_ids_and_names.keys()))
s3_connection = S3Connection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
bucket = s3_connection.get_bucket(AWS_BUCKET_NAME)
k = Key(bucket)
uploaded_resources = []
for resource_id, file_name in resource_ids_and_names.iteritems():
k.key = 'resources/{resource_id}/{file_name}'.format(
resource_id=resource_id, file_name=file_name)
if AWS_STORAGE_PATH:
k.key = AWS_STORAGE_PATH + '/' + k.key
k.set_contents_from_filename(resource_ids_and_paths[resource_id])
uploaded_resources.append(resource_id)
print 'Uploaded resource {0} ({1}) to S3'.format(resource_id, file_name)
print 'Done, uploaded {0} resources to S3'.format(len(uploaded_resources))
|
Add script for migrating local filestore to s3'''
This script copies all resource files from a local FileStore directory
to a remote S3 bucket.
**It will not work for group images**
It requires SQLalchemy and Boto.
Please update the configuration details, all keys are mandatory except
AWS_STORAGE_PATH.
'''
import os
from sqlalchemy import create_engine
from sqlalchemy.sql import text
from boto.s3.connection import S3Connection
from boto.s3.key import Key
# Configuration
BASE_PATH = '/var/lib/ckan/default/resources'
SQLALCHEMY_URL = 'postgresql://user:pass@localhost/db'
AWS_ACCESS_KEY_ID = 'AKIxxxxxx'
AWS_SECRET_ACCESS_KEY = '+NGxxxxxx'
AWS_BUCKET_NAME = 'my-bucket'
AWS_STORAGE_PATH = 'some-path'
resource_ids_and_paths = {}
for root, dirs, files in os.walk(BASE_PATH):
if files:
resource_id = root.split('/')[-2] + root.split('/')[-1] + files[0]
resource_ids_and_paths[resource_id] = os.path.join(root, files[0])
print 'Found {0} resource files in the file system'.format(
len(resource_ids_and_paths.keys()))
engine = create_engine(SQLALCHEMY_URL)
connection = engine.connect()
resource_ids_and_names = {}
try:
for resource_id, file_path in resource_ids_and_paths.iteritems():
resource = connection.execute(text('''
SELECT id, url, url_type
FROM resource
WHERE id = :id
'''), id=resource_id)
if resource.rowcount:
_id, url, _type = resource.first()
if _type == 'upload' and url:
file_name = url.split('/')[-1] if '/' in url else url
resource_ids_and_names[_id] = file_name
finally:
connection.close()
engine.dispose()
print '{0} resources matched on the database'.format(
len(resource_ids_and_names.keys()))
s3_connection = S3Connection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
bucket = s3_connection.get_bucket(AWS_BUCKET_NAME)
k = Key(bucket)
uploaded_resources = []
for resource_id, file_name in resource_ids_and_names.iteritems():
k.key = 'resources/{resource_id}/{file_name}'.format(
resource_id=resource_id, file_name=file_name)
if AWS_STORAGE_PATH:
k.key = AWS_STORAGE_PATH + '/' + k.key
k.set_contents_from_filename(resource_ids_and_paths[resource_id])
uploaded_resources.append(resource_id)
print 'Uploaded resource {0} ({1}) to S3'.format(resource_id, file_name)
print 'Done, uploaded {0} resources to S3'.format(len(uploaded_resources))
|
<commit_before><commit_msg>Add script for migrating local filestore to s3<commit_after>'''
This script copies all resource files from a local FileStore directory
to a remote S3 bucket.
**It will not work for group images**
It requires SQLalchemy and Boto.
Please update the configuration details, all keys are mandatory except
AWS_STORAGE_PATH.
'''
import os
from sqlalchemy import create_engine
from sqlalchemy.sql import text
from boto.s3.connection import S3Connection
from boto.s3.key import Key
# Configuration
BASE_PATH = '/var/lib/ckan/default/resources'
SQLALCHEMY_URL = 'postgresql://user:pass@localhost/db'
AWS_ACCESS_KEY_ID = 'AKIxxxxxx'
AWS_SECRET_ACCESS_KEY = '+NGxxxxxx'
AWS_BUCKET_NAME = 'my-bucket'
AWS_STORAGE_PATH = 'some-path'
resource_ids_and_paths = {}
for root, dirs, files in os.walk(BASE_PATH):
if files:
resource_id = root.split('/')[-2] + root.split('/')[-1] + files[0]
resource_ids_and_paths[resource_id] = os.path.join(root, files[0])
print 'Found {0} resource files in the file system'.format(
len(resource_ids_and_paths.keys()))
engine = create_engine(SQLALCHEMY_URL)
connection = engine.connect()
resource_ids_and_names = {}
try:
for resource_id, file_path in resource_ids_and_paths.iteritems():
resource = connection.execute(text('''
SELECT id, url, url_type
FROM resource
WHERE id = :id
'''), id=resource_id)
if resource.rowcount:
_id, url, _type = resource.first()
if _type == 'upload' and url:
file_name = url.split('/')[-1] if '/' in url else url
resource_ids_and_names[_id] = file_name
finally:
connection.close()
engine.dispose()
print '{0} resources matched on the database'.format(
len(resource_ids_and_names.keys()))
s3_connection = S3Connection(AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY)
bucket = s3_connection.get_bucket(AWS_BUCKET_NAME)
k = Key(bucket)
uploaded_resources = []
for resource_id, file_name in resource_ids_and_names.iteritems():
k.key = 'resources/{resource_id}/{file_name}'.format(
resource_id=resource_id, file_name=file_name)
if AWS_STORAGE_PATH:
k.key = AWS_STORAGE_PATH + '/' + k.key
k.set_contents_from_filename(resource_ids_and_paths[resource_id])
uploaded_resources.append(resource_id)
print 'Uploaded resource {0} ({1}) to S3'.format(resource_id, file_name)
print 'Done, uploaded {0} resources to S3'.format(len(uploaded_resources))
|
|
b68596cc80ac13544744004338602245d17bf6b2
|
Tests/feaLib/ast_test.py
|
Tests/feaLib/ast_test.py
|
from __future__ import print_function, division, absolute_import
from __future__ import unicode_literals
from fontTools.feaLib import ast
import unittest
class AstTest(unittest.TestCase):
def test_glyphname_escape(self):
statement = ast.GlyphClass()
for name in ("BASE", "NULL", "foo", "a"):
statement.append(ast.GlyphName(name))
self.assertEqual(statement.asFea(), r"[\BASE \NULL foo a]")
if __name__ == "__main__":
import sys
sys.exit(unittest.main())
|
Add an ast test for the previous commit
|
[feaLib] Add an ast test for the previous commit
|
Python
|
mit
|
googlefonts/fonttools,fonttools/fonttools
|
[feaLib] Add an ast test for the previous commit
|
from __future__ import print_function, division, absolute_import
from __future__ import unicode_literals
from fontTools.feaLib import ast
import unittest
class AstTest(unittest.TestCase):
def test_glyphname_escape(self):
statement = ast.GlyphClass()
for name in ("BASE", "NULL", "foo", "a"):
statement.append(ast.GlyphName(name))
self.assertEqual(statement.asFea(), r"[\BASE \NULL foo a]")
if __name__ == "__main__":
import sys
sys.exit(unittest.main())
|
<commit_before><commit_msg>[feaLib] Add an ast test for the previous commit<commit_after>
|
from __future__ import print_function, division, absolute_import
from __future__ import unicode_literals
from fontTools.feaLib import ast
import unittest
class AstTest(unittest.TestCase):
def test_glyphname_escape(self):
statement = ast.GlyphClass()
for name in ("BASE", "NULL", "foo", "a"):
statement.append(ast.GlyphName(name))
self.assertEqual(statement.asFea(), r"[\BASE \NULL foo a]")
if __name__ == "__main__":
import sys
sys.exit(unittest.main())
|
[feaLib] Add an ast test for the previous commitfrom __future__ import print_function, division, absolute_import
from __future__ import unicode_literals
from fontTools.feaLib import ast
import unittest
class AstTest(unittest.TestCase):
def test_glyphname_escape(self):
statement = ast.GlyphClass()
for name in ("BASE", "NULL", "foo", "a"):
statement.append(ast.GlyphName(name))
self.assertEqual(statement.asFea(), r"[\BASE \NULL foo a]")
if __name__ == "__main__":
import sys
sys.exit(unittest.main())
|
<commit_before><commit_msg>[feaLib] Add an ast test for the previous commit<commit_after>from __future__ import print_function, division, absolute_import
from __future__ import unicode_literals
from fontTools.feaLib import ast
import unittest
class AstTest(unittest.TestCase):
def test_glyphname_escape(self):
statement = ast.GlyphClass()
for name in ("BASE", "NULL", "foo", "a"):
statement.append(ast.GlyphName(name))
self.assertEqual(statement.asFea(), r"[\BASE \NULL foo a]")
if __name__ == "__main__":
import sys
sys.exit(unittest.main())
|
|
05454d3a00b85ab21a16eb324546be102d85f778
|
osf/migrations/0103_set_osf_storage_node_settings_region.py
|
osf/migrations/0103_set_osf_storage_node_settings_region.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-30 18:34
from __future__ import unicode_literals
import logging
from django.apps import apps
from django.db import connection, migrations, models
from addons.osfstorage.models import NodeSettings, Region
from addons.osfstorage.settings import DEFAULT_REGION_ID, DEFAULT_REGION_NAME
from website.settings import WATERBUTLER_URL
logger = logging.getLogger(__name__)
osfstorage_config = apps.get_app_config('addons_osfstorage')
class Migration(migrations.Migration):
# Avoid locking the addons_osfstorage_nodesettings table
atomic = False
dependencies = [
('osf', '0102_merge_20180509_0846'),
]
def add_default_region_to_nodesettings(self, *args, **kwargs):
default_region, created = Region.objects.get_or_create(
_id=DEFAULT_REGION_ID,
name=DEFAULT_REGION_NAME,
waterbutler_credentials=osfstorage_config.WATERBUTLER_CREDENTIALS,
waterbutler_settings=osfstorage_config.WATERBUTLER_SETTINGS,
waterbutler_url=WATERBUTLER_URL
)
if created:
logger.info('Created default region: {}'.format(DEFAULT_REGION_NAME))
BATCHSIZE = 5000
max_pk = NodeSettings.objects.aggregate(models.Max('pk'))['pk__max']
if max_pk is not None:
for offset in range(0, max_pk + 1, BATCHSIZE):
(NodeSettings.objects
.filter(pk__gte=offset)
.filter(pk__lt=offset + BATCHSIZE)
.filter(region__isnull=True)
.update(region=default_region))
logger.info(
'Updated addons_osfstorage_nodesettings {}-{}/{}'.format(
offset,
offset + BATCHSIZE,
max_pk,
)
)
def unset_default_region(self, *args, **kwargs):
BATCHSIZE = 5000
max_pk = NodeSettings.objects.aggregate(models.Max('pk'))['pk__max']
if max_pk is not None:
for offset in range(0, max_pk + 1, BATCHSIZE):
(NodeSettings.objects
.filter(pk__gte=offset)
.filter(pk__lt=offset + BATCHSIZE)
.filter(region__isnull=False)
.update(region=None))
logger.info(
'Unset addons_osfstorage_nodesettings {}-{}/{}'.format(
offset,
offset + BATCHSIZE,
max_pk,
)
)
operations = [
migrations.RunPython(add_default_region_to_nodesettings, unset_default_region),
]
|
Add migration to set region on existing NodeSettings
|
Add migration to set region on existing NodeSettings
|
Python
|
apache-2.0
|
brianjgeiger/osf.io,mfraezz/osf.io,caseyrollins/osf.io,cslzchen/osf.io,CenterForOpenScience/osf.io,sloria/osf.io,HalcyonChimera/osf.io,pattisdr/osf.io,brianjgeiger/osf.io,aaxelb/osf.io,felliott/osf.io,icereval/osf.io,sloria/osf.io,adlius/osf.io,Johnetordoff/osf.io,sloria/osf.io,erinspace/osf.io,felliott/osf.io,adlius/osf.io,icereval/osf.io,mattclark/osf.io,adlius/osf.io,baylee-d/osf.io,CenterForOpenScience/osf.io,CenterForOpenScience/osf.io,erinspace/osf.io,HalcyonChimera/osf.io,mfraezz/osf.io,brianjgeiger/osf.io,Johnetordoff/osf.io,baylee-d/osf.io,pattisdr/osf.io,mattclark/osf.io,felliott/osf.io,Johnetordoff/osf.io,pattisdr/osf.io,Johnetordoff/osf.io,baylee-d/osf.io,adlius/osf.io,mfraezz/osf.io,cslzchen/osf.io,HalcyonChimera/osf.io,cslzchen/osf.io,aaxelb/osf.io,caseyrollins/osf.io,CenterForOpenScience/osf.io,cslzchen/osf.io,icereval/osf.io,erinspace/osf.io,HalcyonChimera/osf.io,aaxelb/osf.io,brianjgeiger/osf.io,caseyrollins/osf.io,mfraezz/osf.io,saradbowman/osf.io,mattclark/osf.io,aaxelb/osf.io,saradbowman/osf.io,felliott/osf.io
|
Add migration to set region on existing NodeSettings
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-30 18:34
from __future__ import unicode_literals
import logging
from django.apps import apps
from django.db import connection, migrations, models
from addons.osfstorage.models import NodeSettings, Region
from addons.osfstorage.settings import DEFAULT_REGION_ID, DEFAULT_REGION_NAME
from website.settings import WATERBUTLER_URL
logger = logging.getLogger(__name__)
osfstorage_config = apps.get_app_config('addons_osfstorage')
class Migration(migrations.Migration):
# Avoid locking the addons_osfstorage_nodesettings table
atomic = False
dependencies = [
('osf', '0102_merge_20180509_0846'),
]
def add_default_region_to_nodesettings(self, *args, **kwargs):
default_region, created = Region.objects.get_or_create(
_id=DEFAULT_REGION_ID,
name=DEFAULT_REGION_NAME,
waterbutler_credentials=osfstorage_config.WATERBUTLER_CREDENTIALS,
waterbutler_settings=osfstorage_config.WATERBUTLER_SETTINGS,
waterbutler_url=WATERBUTLER_URL
)
if created:
logger.info('Created default region: {}'.format(DEFAULT_REGION_NAME))
BATCHSIZE = 5000
max_pk = NodeSettings.objects.aggregate(models.Max('pk'))['pk__max']
if max_pk is not None:
for offset in range(0, max_pk + 1, BATCHSIZE):
(NodeSettings.objects
.filter(pk__gte=offset)
.filter(pk__lt=offset + BATCHSIZE)
.filter(region__isnull=True)
.update(region=default_region))
logger.info(
'Updated addons_osfstorage_nodesettings {}-{}/{}'.format(
offset,
offset + BATCHSIZE,
max_pk,
)
)
def unset_default_region(self, *args, **kwargs):
BATCHSIZE = 5000
max_pk = NodeSettings.objects.aggregate(models.Max('pk'))['pk__max']
if max_pk is not None:
for offset in range(0, max_pk + 1, BATCHSIZE):
(NodeSettings.objects
.filter(pk__gte=offset)
.filter(pk__lt=offset + BATCHSIZE)
.filter(region__isnull=False)
.update(region=None))
logger.info(
'Unset addons_osfstorage_nodesettings {}-{}/{}'.format(
offset,
offset + BATCHSIZE,
max_pk,
)
)
operations = [
migrations.RunPython(add_default_region_to_nodesettings, unset_default_region),
]
|
<commit_before><commit_msg>Add migration to set region on existing NodeSettings<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-30 18:34
from __future__ import unicode_literals
import logging
from django.apps import apps
from django.db import connection, migrations, models
from addons.osfstorage.models import NodeSettings, Region
from addons.osfstorage.settings import DEFAULT_REGION_ID, DEFAULT_REGION_NAME
from website.settings import WATERBUTLER_URL
logger = logging.getLogger(__name__)
osfstorage_config = apps.get_app_config('addons_osfstorage')
class Migration(migrations.Migration):
# Avoid locking the addons_osfstorage_nodesettings table
atomic = False
dependencies = [
('osf', '0102_merge_20180509_0846'),
]
def add_default_region_to_nodesettings(self, *args, **kwargs):
default_region, created = Region.objects.get_or_create(
_id=DEFAULT_REGION_ID,
name=DEFAULT_REGION_NAME,
waterbutler_credentials=osfstorage_config.WATERBUTLER_CREDENTIALS,
waterbutler_settings=osfstorage_config.WATERBUTLER_SETTINGS,
waterbutler_url=WATERBUTLER_URL
)
if created:
logger.info('Created default region: {}'.format(DEFAULT_REGION_NAME))
BATCHSIZE = 5000
max_pk = NodeSettings.objects.aggregate(models.Max('pk'))['pk__max']
if max_pk is not None:
for offset in range(0, max_pk + 1, BATCHSIZE):
(NodeSettings.objects
.filter(pk__gte=offset)
.filter(pk__lt=offset + BATCHSIZE)
.filter(region__isnull=True)
.update(region=default_region))
logger.info(
'Updated addons_osfstorage_nodesettings {}-{}/{}'.format(
offset,
offset + BATCHSIZE,
max_pk,
)
)
def unset_default_region(self, *args, **kwargs):
BATCHSIZE = 5000
max_pk = NodeSettings.objects.aggregate(models.Max('pk'))['pk__max']
if max_pk is not None:
for offset in range(0, max_pk + 1, BATCHSIZE):
(NodeSettings.objects
.filter(pk__gte=offset)
.filter(pk__lt=offset + BATCHSIZE)
.filter(region__isnull=False)
.update(region=None))
logger.info(
'Unset addons_osfstorage_nodesettings {}-{}/{}'.format(
offset,
offset + BATCHSIZE,
max_pk,
)
)
operations = [
migrations.RunPython(add_default_region_to_nodesettings, unset_default_region),
]
|
Add migration to set region on existing NodeSettings# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-30 18:34
from __future__ import unicode_literals
import logging
from django.apps import apps
from django.db import connection, migrations, models
from addons.osfstorage.models import NodeSettings, Region
from addons.osfstorage.settings import DEFAULT_REGION_ID, DEFAULT_REGION_NAME
from website.settings import WATERBUTLER_URL
logger = logging.getLogger(__name__)
osfstorage_config = apps.get_app_config('addons_osfstorage')
class Migration(migrations.Migration):
# Avoid locking the addons_osfstorage_nodesettings table
atomic = False
dependencies = [
('osf', '0102_merge_20180509_0846'),
]
def add_default_region_to_nodesettings(self, *args, **kwargs):
default_region, created = Region.objects.get_or_create(
_id=DEFAULT_REGION_ID,
name=DEFAULT_REGION_NAME,
waterbutler_credentials=osfstorage_config.WATERBUTLER_CREDENTIALS,
waterbutler_settings=osfstorage_config.WATERBUTLER_SETTINGS,
waterbutler_url=WATERBUTLER_URL
)
if created:
logger.info('Created default region: {}'.format(DEFAULT_REGION_NAME))
BATCHSIZE = 5000
max_pk = NodeSettings.objects.aggregate(models.Max('pk'))['pk__max']
if max_pk is not None:
for offset in range(0, max_pk + 1, BATCHSIZE):
(NodeSettings.objects
.filter(pk__gte=offset)
.filter(pk__lt=offset + BATCHSIZE)
.filter(region__isnull=True)
.update(region=default_region))
logger.info(
'Updated addons_osfstorage_nodesettings {}-{}/{}'.format(
offset,
offset + BATCHSIZE,
max_pk,
)
)
def unset_default_region(self, *args, **kwargs):
BATCHSIZE = 5000
max_pk = NodeSettings.objects.aggregate(models.Max('pk'))['pk__max']
if max_pk is not None:
for offset in range(0, max_pk + 1, BATCHSIZE):
(NodeSettings.objects
.filter(pk__gte=offset)
.filter(pk__lt=offset + BATCHSIZE)
.filter(region__isnull=False)
.update(region=None))
logger.info(
'Unset addons_osfstorage_nodesettings {}-{}/{}'.format(
offset,
offset + BATCHSIZE,
max_pk,
)
)
operations = [
migrations.RunPython(add_default_region_to_nodesettings, unset_default_region),
]
|
<commit_before><commit_msg>Add migration to set region on existing NodeSettings<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-30 18:34
from __future__ import unicode_literals
import logging
from django.apps import apps
from django.db import connection, migrations, models
from addons.osfstorage.models import NodeSettings, Region
from addons.osfstorage.settings import DEFAULT_REGION_ID, DEFAULT_REGION_NAME
from website.settings import WATERBUTLER_URL
logger = logging.getLogger(__name__)
osfstorage_config = apps.get_app_config('addons_osfstorage')
class Migration(migrations.Migration):
# Avoid locking the addons_osfstorage_nodesettings table
atomic = False
dependencies = [
('osf', '0102_merge_20180509_0846'),
]
def add_default_region_to_nodesettings(self, *args, **kwargs):
default_region, created = Region.objects.get_or_create(
_id=DEFAULT_REGION_ID,
name=DEFAULT_REGION_NAME,
waterbutler_credentials=osfstorage_config.WATERBUTLER_CREDENTIALS,
waterbutler_settings=osfstorage_config.WATERBUTLER_SETTINGS,
waterbutler_url=WATERBUTLER_URL
)
if created:
logger.info('Created default region: {}'.format(DEFAULT_REGION_NAME))
BATCHSIZE = 5000
max_pk = NodeSettings.objects.aggregate(models.Max('pk'))['pk__max']
if max_pk is not None:
for offset in range(0, max_pk + 1, BATCHSIZE):
(NodeSettings.objects
.filter(pk__gte=offset)
.filter(pk__lt=offset + BATCHSIZE)
.filter(region__isnull=True)
.update(region=default_region))
logger.info(
'Updated addons_osfstorage_nodesettings {}-{}/{}'.format(
offset,
offset + BATCHSIZE,
max_pk,
)
)
def unset_default_region(self, *args, **kwargs):
BATCHSIZE = 5000
max_pk = NodeSettings.objects.aggregate(models.Max('pk'))['pk__max']
if max_pk is not None:
for offset in range(0, max_pk + 1, BATCHSIZE):
(NodeSettings.objects
.filter(pk__gte=offset)
.filter(pk__lt=offset + BATCHSIZE)
.filter(region__isnull=False)
.update(region=None))
logger.info(
'Unset addons_osfstorage_nodesettings {}-{}/{}'.format(
offset,
offset + BATCHSIZE,
max_pk,
)
)
operations = [
migrations.RunPython(add_default_region_to_nodesettings, unset_default_region),
]
|
|
24a90be97f04cdc52c6a72e835c903c7de297465
|
src/algorithms/tests/change_file_formats.py
|
src/algorithms/tests/change_file_formats.py
|
from unittest import TestCase
from algorithms.tests import TEST_FILE_PATH
from os.path import join
class ChangeFileFormatTests(TestCase):
def test_convert_arff_to_csv(self):
source = join(TEST_FILE_PATH, 'pauksciai.arff')
expected = join(TEST_FILE_PATH, 'pauksciai.csv')
def test_convert_csv_to_arff(self):
pass
def test_convert_xml_to_csv(self):
pass
def test_convert_csv_to_xml(self):
pass
|
Add empty tests for file format changing.
|
Add empty tests for file format changing.
|
Python
|
agpl-3.0
|
InScience/DAMIS-old,InScience/DAMIS-old
|
Add empty tests for file format changing.
|
from unittest import TestCase
from algorithms.tests import TEST_FILE_PATH
from os.path import join
class ChangeFileFormatTests(TestCase):
def test_convert_arff_to_csv(self):
source = join(TEST_FILE_PATH, 'pauksciai.arff')
expected = join(TEST_FILE_PATH, 'pauksciai.csv')
def test_convert_csv_to_arff(self):
pass
def test_convert_xml_to_csv(self):
pass
def test_convert_csv_to_xml(self):
pass
|
<commit_before><commit_msg>Add empty tests for file format changing.<commit_after>
|
from unittest import TestCase
from algorithms.tests import TEST_FILE_PATH
from os.path import join
class ChangeFileFormatTests(TestCase):
def test_convert_arff_to_csv(self):
source = join(TEST_FILE_PATH, 'pauksciai.arff')
expected = join(TEST_FILE_PATH, 'pauksciai.csv')
def test_convert_csv_to_arff(self):
pass
def test_convert_xml_to_csv(self):
pass
def test_convert_csv_to_xml(self):
pass
|
Add empty tests for file format changing.from unittest import TestCase
from algorithms.tests import TEST_FILE_PATH
from os.path import join
class ChangeFileFormatTests(TestCase):
def test_convert_arff_to_csv(self):
source = join(TEST_FILE_PATH, 'pauksciai.arff')
expected = join(TEST_FILE_PATH, 'pauksciai.csv')
def test_convert_csv_to_arff(self):
pass
def test_convert_xml_to_csv(self):
pass
def test_convert_csv_to_xml(self):
pass
|
<commit_before><commit_msg>Add empty tests for file format changing.<commit_after>from unittest import TestCase
from algorithms.tests import TEST_FILE_PATH
from os.path import join
class ChangeFileFormatTests(TestCase):
def test_convert_arff_to_csv(self):
source = join(TEST_FILE_PATH, 'pauksciai.arff')
expected = join(TEST_FILE_PATH, 'pauksciai.csv')
def test_convert_csv_to_arff(self):
pass
def test_convert_xml_to_csv(self):
pass
def test_convert_csv_to_xml(self):
pass
|
|
59097ff3523926d70ec267bb96e015232d6d74c0
|
jqm-all/checkHeader.py
|
jqm-all/checkHeader.py
|
#!/usr/bin/env python2
# coding:utf-8
import os
import re
import shutil
JQM_ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
tmpFilePath = os.path.join(JQM_ROOT_DIR, "__tmp_file.java")
HEADER = """/**
* Copyright © 2013 enioka. All rights reserved
%s *
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"""
AUTHOR = re.compile(".* \(\S+@\S+\.\S+\)")
if __name__ == "__main__":
for dirpath, dirnames, filenames in os.walk(JQM_ROOT_DIR):
for filename in filenames:
if filename.endswith(".java"):
authors = []
path = os.path.join(dirpath, filename)
tmp = open(tmpFilePath, "w")
inHeader = True
for line in open(path, "r"):
if inHeader:
if line.startswith("/*") or line.startswith(" *"):
# print "reading header: %s " % line.strip()
if AUTHOR.match(line):
authors.append(line)
# print line
else:
# print "End of header %s" % line.strip()
inHeader = False
tmp.write(HEADER % "".join(authors))
tmp.write(line)
else:
tmp.write(line)
tmp.close()
shutil.copy(tmpFilePath, path)
os.unlink(tmpFilePath)
|
Add script that define java source code standard header
|
Add script that define java source code standard header
|
Python
|
apache-2.0
|
enioka/jqm,enioka/jqm,enioka/jqm,enioka/jqm,enioka/jqm
|
Add script that define java source code standard header
|
#!/usr/bin/env python2
# coding:utf-8
import os
import re
import shutil
JQM_ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
tmpFilePath = os.path.join(JQM_ROOT_DIR, "__tmp_file.java")
HEADER = """/**
* Copyright © 2013 enioka. All rights reserved
%s *
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"""
AUTHOR = re.compile(".* \(\S+@\S+\.\S+\)")
if __name__ == "__main__":
for dirpath, dirnames, filenames in os.walk(JQM_ROOT_DIR):
for filename in filenames:
if filename.endswith(".java"):
authors = []
path = os.path.join(dirpath, filename)
tmp = open(tmpFilePath, "w")
inHeader = True
for line in open(path, "r"):
if inHeader:
if line.startswith("/*") or line.startswith(" *"):
# print "reading header: %s " % line.strip()
if AUTHOR.match(line):
authors.append(line)
# print line
else:
# print "End of header %s" % line.strip()
inHeader = False
tmp.write(HEADER % "".join(authors))
tmp.write(line)
else:
tmp.write(line)
tmp.close()
shutil.copy(tmpFilePath, path)
os.unlink(tmpFilePath)
|
<commit_before><commit_msg>Add script that define java source code standard header<commit_after>
|
#!/usr/bin/env python2
# coding:utf-8
import os
import re
import shutil
JQM_ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
tmpFilePath = os.path.join(JQM_ROOT_DIR, "__tmp_file.java")
HEADER = """/**
* Copyright © 2013 enioka. All rights reserved
%s *
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"""
AUTHOR = re.compile(".* \(\S+@\S+\.\S+\)")
if __name__ == "__main__":
for dirpath, dirnames, filenames in os.walk(JQM_ROOT_DIR):
for filename in filenames:
if filename.endswith(".java"):
authors = []
path = os.path.join(dirpath, filename)
tmp = open(tmpFilePath, "w")
inHeader = True
for line in open(path, "r"):
if inHeader:
if line.startswith("/*") or line.startswith(" *"):
# print "reading header: %s " % line.strip()
if AUTHOR.match(line):
authors.append(line)
# print line
else:
# print "End of header %s" % line.strip()
inHeader = False
tmp.write(HEADER % "".join(authors))
tmp.write(line)
else:
tmp.write(line)
tmp.close()
shutil.copy(tmpFilePath, path)
os.unlink(tmpFilePath)
|
Add script that define java source code standard header#!/usr/bin/env python2
# coding:utf-8
import os
import re
import shutil
JQM_ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
tmpFilePath = os.path.join(JQM_ROOT_DIR, "__tmp_file.java")
HEADER = """/**
* Copyright © 2013 enioka. All rights reserved
%s *
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"""
AUTHOR = re.compile(".* \(\S+@\S+\.\S+\)")
if __name__ == "__main__":
for dirpath, dirnames, filenames in os.walk(JQM_ROOT_DIR):
for filename in filenames:
if filename.endswith(".java"):
authors = []
path = os.path.join(dirpath, filename)
tmp = open(tmpFilePath, "w")
inHeader = True
for line in open(path, "r"):
if inHeader:
if line.startswith("/*") or line.startswith(" *"):
# print "reading header: %s " % line.strip()
if AUTHOR.match(line):
authors.append(line)
# print line
else:
# print "End of header %s" % line.strip()
inHeader = False
tmp.write(HEADER % "".join(authors))
tmp.write(line)
else:
tmp.write(line)
tmp.close()
shutil.copy(tmpFilePath, path)
os.unlink(tmpFilePath)
|
<commit_before><commit_msg>Add script that define java source code standard header<commit_after>#!/usr/bin/env python2
# coding:utf-8
import os
import re
import shutil
JQM_ROOT_DIR = os.path.abspath(os.path.dirname(__file__))
tmpFilePath = os.path.join(JQM_ROOT_DIR, "__tmp_file.java")
HEADER = """/**
* Copyright © 2013 enioka. All rights reserved
%s *
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
"""
AUTHOR = re.compile(".* \(\S+@\S+\.\S+\)")
if __name__ == "__main__":
for dirpath, dirnames, filenames in os.walk(JQM_ROOT_DIR):
for filename in filenames:
if filename.endswith(".java"):
authors = []
path = os.path.join(dirpath, filename)
tmp = open(tmpFilePath, "w")
inHeader = True
for line in open(path, "r"):
if inHeader:
if line.startswith("/*") or line.startswith(" *"):
# print "reading header: %s " % line.strip()
if AUTHOR.match(line):
authors.append(line)
# print line
else:
# print "End of header %s" % line.strip()
inHeader = False
tmp.write(HEADER % "".join(authors))
tmp.write(line)
else:
tmp.write(line)
tmp.close()
shutil.copy(tmpFilePath, path)
os.unlink(tmpFilePath)
|
|
513d8a37d612253b87f0d30d3cab42ba25e98dcf
|
migrations/versions/8cf43589ca8b_add_email_address_in_account_table.py
|
migrations/versions/8cf43589ca8b_add_email_address_in_account_table.py
|
"""Add email address in Account Table for sending mailers.
Revision ID: 8cf43589ca8b
Revises: 3828e380de20
Create Date: 2018-08-28 12:47:31.858127
"""
# revision identifiers, used by Alembic.
revision = '8cf43589ca8b'
down_revision = '3828e380de20'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('account', sa.Column('email_address', sa.String(length=512), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('account', 'email_address')
# ### end Alembic commands ###
|
Add db migration for adding email address field in Account Table
|
Add db migration for adding email address field in Account Table
|
Python
|
apache-2.0
|
stackArmor/security_monkey,stackArmor/security_monkey,stackArmor/security_monkey,stackArmor/security_monkey,stackArmor/security_monkey
|
Add db migration for adding email address field in Account Table
|
"""Add email address in Account Table for sending mailers.
Revision ID: 8cf43589ca8b
Revises: 3828e380de20
Create Date: 2018-08-28 12:47:31.858127
"""
# revision identifiers, used by Alembic.
revision = '8cf43589ca8b'
down_revision = '3828e380de20'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('account', sa.Column('email_address', sa.String(length=512), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('account', 'email_address')
# ### end Alembic commands ###
|
<commit_before><commit_msg>Add db migration for adding email address field in Account Table<commit_after>
|
"""Add email address in Account Table for sending mailers.
Revision ID: 8cf43589ca8b
Revises: 3828e380de20
Create Date: 2018-08-28 12:47:31.858127
"""
# revision identifiers, used by Alembic.
revision = '8cf43589ca8b'
down_revision = '3828e380de20'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('account', sa.Column('email_address', sa.String(length=512), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('account', 'email_address')
# ### end Alembic commands ###
|
Add db migration for adding email address field in Account Table"""Add email address in Account Table for sending mailers.
Revision ID: 8cf43589ca8b
Revises: 3828e380de20
Create Date: 2018-08-28 12:47:31.858127
"""
# revision identifiers, used by Alembic.
revision = '8cf43589ca8b'
down_revision = '3828e380de20'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('account', sa.Column('email_address', sa.String(length=512), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('account', 'email_address')
# ### end Alembic commands ###
|
<commit_before><commit_msg>Add db migration for adding email address field in Account Table<commit_after>"""Add email address in Account Table for sending mailers.
Revision ID: 8cf43589ca8b
Revises: 3828e380de20
Create Date: 2018-08-28 12:47:31.858127
"""
# revision identifiers, used by Alembic.
revision = '8cf43589ca8b'
down_revision = '3828e380de20'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('account', sa.Column('email_address', sa.String(length=512), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('account', 'email_address')
# ### end Alembic commands ###
|
|
c967c86e9dd4ae6ec85049a062cd3155a905268a
|
rest_framework_social_oauth2/management/commands/createapp.py
|
rest_framework_social_oauth2/management/commands/createapp.py
|
from django.core.management.base import BaseCommand, CommandError
from oauth2_provider.models import Application
from django.contrib.auth.models import User
from oauth2_provider.generators import generate_client_id, generate_client_secret
class Command(BaseCommand):
help = "Create a Django OAuth Toolkit application (an existing admin is required)"
def add_arguments(self, parser):
parser.add_argument(
"-ci", "--client_id",
help="Client ID (recommeded 40 characters long)"
)
parser.add_argument(
"-cs", "--client_secret",
help="Client Secret (recommeded 128 characters long)"
)
parser.add_argument(
"-n", "--name",
help="Name for the application"
)
def handle(self, *args, **options):
new_application = Application(
user= User.objects.filter(is_superuser=True)[0],
client_type="confidential",
authorization_grant_type="password",
name=options["name"] or "socialauth_application",
client_id=options["client_id"] or generate_client_id(),
client_secret=options["client_secret"] or generate_client_secret(),
)
new_application.save()
|
Create manage.py command to create an application
|
Create manage.py command to create an application
|
Python
|
mit
|
PhilipGarnero/django-rest-framework-social-oauth2
|
Create manage.py command to create an application
|
from django.core.management.base import BaseCommand, CommandError
from oauth2_provider.models import Application
from django.contrib.auth.models import User
from oauth2_provider.generators import generate_client_id, generate_client_secret
class Command(BaseCommand):
help = "Create a Django OAuth Toolkit application (an existing admin is required)"
def add_arguments(self, parser):
parser.add_argument(
"-ci", "--client_id",
help="Client ID (recommeded 40 characters long)"
)
parser.add_argument(
"-cs", "--client_secret",
help="Client Secret (recommeded 128 characters long)"
)
parser.add_argument(
"-n", "--name",
help="Name for the application"
)
def handle(self, *args, **options):
new_application = Application(
user= User.objects.filter(is_superuser=True)[0],
client_type="confidential",
authorization_grant_type="password",
name=options["name"] or "socialauth_application",
client_id=options["client_id"] or generate_client_id(),
client_secret=options["client_secret"] or generate_client_secret(),
)
new_application.save()
|
<commit_before><commit_msg>Create manage.py command to create an application<commit_after>
|
from django.core.management.base import BaseCommand, CommandError
from oauth2_provider.models import Application
from django.contrib.auth.models import User
from oauth2_provider.generators import generate_client_id, generate_client_secret
class Command(BaseCommand):
help = "Create a Django OAuth Toolkit application (an existing admin is required)"
def add_arguments(self, parser):
parser.add_argument(
"-ci", "--client_id",
help="Client ID (recommeded 40 characters long)"
)
parser.add_argument(
"-cs", "--client_secret",
help="Client Secret (recommeded 128 characters long)"
)
parser.add_argument(
"-n", "--name",
help="Name for the application"
)
def handle(self, *args, **options):
new_application = Application(
user= User.objects.filter(is_superuser=True)[0],
client_type="confidential",
authorization_grant_type="password",
name=options["name"] or "socialauth_application",
client_id=options["client_id"] or generate_client_id(),
client_secret=options["client_secret"] or generate_client_secret(),
)
new_application.save()
|
Create manage.py command to create an applicationfrom django.core.management.base import BaseCommand, CommandError
from oauth2_provider.models import Application
from django.contrib.auth.models import User
from oauth2_provider.generators import generate_client_id, generate_client_secret
class Command(BaseCommand):
help = "Create a Django OAuth Toolkit application (an existing admin is required)"
def add_arguments(self, parser):
parser.add_argument(
"-ci", "--client_id",
help="Client ID (recommeded 40 characters long)"
)
parser.add_argument(
"-cs", "--client_secret",
help="Client Secret (recommeded 128 characters long)"
)
parser.add_argument(
"-n", "--name",
help="Name for the application"
)
def handle(self, *args, **options):
new_application = Application(
user= User.objects.filter(is_superuser=True)[0],
client_type="confidential",
authorization_grant_type="password",
name=options["name"] or "socialauth_application",
client_id=options["client_id"] or generate_client_id(),
client_secret=options["client_secret"] or generate_client_secret(),
)
new_application.save()
|
<commit_before><commit_msg>Create manage.py command to create an application<commit_after>from django.core.management.base import BaseCommand, CommandError
from oauth2_provider.models import Application
from django.contrib.auth.models import User
from oauth2_provider.generators import generate_client_id, generate_client_secret
class Command(BaseCommand):
help = "Create a Django OAuth Toolkit application (an existing admin is required)"
def add_arguments(self, parser):
parser.add_argument(
"-ci", "--client_id",
help="Client ID (recommeded 40 characters long)"
)
parser.add_argument(
"-cs", "--client_secret",
help="Client Secret (recommeded 128 characters long)"
)
parser.add_argument(
"-n", "--name",
help="Name for the application"
)
def handle(self, *args, **options):
new_application = Application(
user= User.objects.filter(is_superuser=True)[0],
client_type="confidential",
authorization_grant_type="password",
name=options["name"] or "socialauth_application",
client_id=options["client_id"] or generate_client_id(),
client_secret=options["client_secret"] or generate_client_secret(),
)
new_application.save()
|
|
05ce8e0ff96b0d283cedfc0058a06234bb4d0630
|
scripts/py36-blake2.py
|
scripts/py36-blake2.py
|
"""
This script checks compatibility of crypto.blake2b_256 against hashlib.blake2b in CPython 3.6.
"""
import hashlib
import sys
def test_b2(b2_input, b2_output):
digest = hashlib.blake2b(b2_input, digest_size=32).digest()
identical = b2_output == digest
print('Input: ', b2_input.hex())
print('Expected: ', b2_output.hex())
print('Calculated:', digest.hex())
print('Identical: ', identical)
print()
if not identical:
sys.exit(1)
test_b2(
bytes.fromhex('037fb9b75b20d623f1d5a568050fccde4a1b7c5f5047432925e941a17c7a2d0d7061796c6f6164'),
bytes.fromhex('a22d4fc81bb61c3846c334a09eaf28d22dd7df08c9a7a41e713ef28d80eebd45')
)
test_b2(
b'abc',
bytes.fromhex('bddd813c634239723171ef3fee98579b94964e3bb1cb3e427262c8c068d52319')
)
test_b2(
bytes.fromhex('e944973af2256d4d670c12dd75304c319f58f4e40df6fb18ef996cb47e063676') + b'1234567890' * 100,
bytes.fromhex('97ede832378531dd0f4c668685d166e797da27b47d8cd441e885b60abd5e0cb2'),
)
|
Add test script for blake2b_256 against CPython 3.6 hashlib
|
Add test script for blake2b_256 against CPython 3.6 hashlib
|
Python
|
bsd-3-clause
|
edgewood/borg,RonnyPfannschmidt/borg,raxenak/borg,edgimar/borg,RonnyPfannschmidt/borg,edgewood/borg,RonnyPfannschmidt/borg,edgewood/borg,raxenak/borg,RonnyPfannschmidt/borg,edgimar/borg,edgimar/borg,RonnyPfannschmidt/borg,edgewood/borg,raxenak/borg,raxenak/borg,edgimar/borg
|
Add test script for blake2b_256 against CPython 3.6 hashlib
|
"""
This script checks compatibility of crypto.blake2b_256 against hashlib.blake2b in CPython 3.6.
"""
import hashlib
import sys
def test_b2(b2_input, b2_output):
digest = hashlib.blake2b(b2_input, digest_size=32).digest()
identical = b2_output == digest
print('Input: ', b2_input.hex())
print('Expected: ', b2_output.hex())
print('Calculated:', digest.hex())
print('Identical: ', identical)
print()
if not identical:
sys.exit(1)
test_b2(
bytes.fromhex('037fb9b75b20d623f1d5a568050fccde4a1b7c5f5047432925e941a17c7a2d0d7061796c6f6164'),
bytes.fromhex('a22d4fc81bb61c3846c334a09eaf28d22dd7df08c9a7a41e713ef28d80eebd45')
)
test_b2(
b'abc',
bytes.fromhex('bddd813c634239723171ef3fee98579b94964e3bb1cb3e427262c8c068d52319')
)
test_b2(
bytes.fromhex('e944973af2256d4d670c12dd75304c319f58f4e40df6fb18ef996cb47e063676') + b'1234567890' * 100,
bytes.fromhex('97ede832378531dd0f4c668685d166e797da27b47d8cd441e885b60abd5e0cb2'),
)
|
<commit_before><commit_msg>Add test script for blake2b_256 against CPython 3.6 hashlib<commit_after>
|
"""
This script checks compatibility of crypto.blake2b_256 against hashlib.blake2b in CPython 3.6.
"""
import hashlib
import sys
def test_b2(b2_input, b2_output):
digest = hashlib.blake2b(b2_input, digest_size=32).digest()
identical = b2_output == digest
print('Input: ', b2_input.hex())
print('Expected: ', b2_output.hex())
print('Calculated:', digest.hex())
print('Identical: ', identical)
print()
if not identical:
sys.exit(1)
test_b2(
bytes.fromhex('037fb9b75b20d623f1d5a568050fccde4a1b7c5f5047432925e941a17c7a2d0d7061796c6f6164'),
bytes.fromhex('a22d4fc81bb61c3846c334a09eaf28d22dd7df08c9a7a41e713ef28d80eebd45')
)
test_b2(
b'abc',
bytes.fromhex('bddd813c634239723171ef3fee98579b94964e3bb1cb3e427262c8c068d52319')
)
test_b2(
bytes.fromhex('e944973af2256d4d670c12dd75304c319f58f4e40df6fb18ef996cb47e063676') + b'1234567890' * 100,
bytes.fromhex('97ede832378531dd0f4c668685d166e797da27b47d8cd441e885b60abd5e0cb2'),
)
|
Add test script for blake2b_256 against CPython 3.6 hashlib
"""
This script checks compatibility of crypto.blake2b_256 against hashlib.blake2b in CPython 3.6.
"""
import hashlib
import sys
def test_b2(b2_input, b2_output):
digest = hashlib.blake2b(b2_input, digest_size=32).digest()
identical = b2_output == digest
print('Input: ', b2_input.hex())
print('Expected: ', b2_output.hex())
print('Calculated:', digest.hex())
print('Identical: ', identical)
print()
if not identical:
sys.exit(1)
test_b2(
bytes.fromhex('037fb9b75b20d623f1d5a568050fccde4a1b7c5f5047432925e941a17c7a2d0d7061796c6f6164'),
bytes.fromhex('a22d4fc81bb61c3846c334a09eaf28d22dd7df08c9a7a41e713ef28d80eebd45')
)
test_b2(
b'abc',
bytes.fromhex('bddd813c634239723171ef3fee98579b94964e3bb1cb3e427262c8c068d52319')
)
test_b2(
bytes.fromhex('e944973af2256d4d670c12dd75304c319f58f4e40df6fb18ef996cb47e063676') + b'1234567890' * 100,
bytes.fromhex('97ede832378531dd0f4c668685d166e797da27b47d8cd441e885b60abd5e0cb2'),
)
|
<commit_before><commit_msg>Add test script for blake2b_256 against CPython 3.6 hashlib<commit_after>
"""
This script checks compatibility of crypto.blake2b_256 against hashlib.blake2b in CPython 3.6.
"""
import hashlib
import sys
def test_b2(b2_input, b2_output):
digest = hashlib.blake2b(b2_input, digest_size=32).digest()
identical = b2_output == digest
print('Input: ', b2_input.hex())
print('Expected: ', b2_output.hex())
print('Calculated:', digest.hex())
print('Identical: ', identical)
print()
if not identical:
sys.exit(1)
test_b2(
bytes.fromhex('037fb9b75b20d623f1d5a568050fccde4a1b7c5f5047432925e941a17c7a2d0d7061796c6f6164'),
bytes.fromhex('a22d4fc81bb61c3846c334a09eaf28d22dd7df08c9a7a41e713ef28d80eebd45')
)
test_b2(
b'abc',
bytes.fromhex('bddd813c634239723171ef3fee98579b94964e3bb1cb3e427262c8c068d52319')
)
test_b2(
bytes.fromhex('e944973af2256d4d670c12dd75304c319f58f4e40df6fb18ef996cb47e063676') + b'1234567890' * 100,
bytes.fromhex('97ede832378531dd0f4c668685d166e797da27b47d8cd441e885b60abd5e0cb2'),
)
|
|
6baa4144bd7fadf0cb09fb404b2d0aad87b944ec
|
alembic/versions/537db2979434_add_category_constraint_to_app_project.py
|
alembic/versions/537db2979434_add_category_constraint_to_app_project.py
|
"""Add category constraint to app/project
Revision ID: 537db2979434
Revises: 7927d63d556
Create Date: 2014-09-25 10:39:57.300726
"""
# revision identifiers, used by Alembic.
revision = '537db2979434'
down_revision = '7927d63d556'
from alembic import op
import sqlalchemy as sa
def upgrade():
query = 'UPDATE app SET category_id=(SELECT id FROM category ORDER BY id asc limit 1) WHERE app.category_id is NULL;'
op.execute(query)
op.alter_column('app', 'category_id', nullable=False)
def downgrade():
op.alter_column('app', 'category_id', nullable=True)
|
Add alembic revision for category constraint in project
|
Add alembic revision for category constraint in project
|
Python
|
agpl-3.0
|
jean/pybossa,inteligencia-coletiva-lsd/pybossa,geotagx/pybossa,Scifabric/pybossa,harihpr/tweetclickers,inteligencia-coletiva-lsd/pybossa,stefanhahmann/pybossa,PyBossa/pybossa,OpenNewsLabs/pybossa,geotagx/pybossa,PyBossa/pybossa,jean/pybossa,stefanhahmann/pybossa,harihpr/tweetclickers,Scifabric/pybossa,OpenNewsLabs/pybossa
|
Add alembic revision for category constraint in project
|
"""Add category constraint to app/project
Revision ID: 537db2979434
Revises: 7927d63d556
Create Date: 2014-09-25 10:39:57.300726
"""
# revision identifiers, used by Alembic.
revision = '537db2979434'
down_revision = '7927d63d556'
from alembic import op
import sqlalchemy as sa
def upgrade():
query = 'UPDATE app SET category_id=(SELECT id FROM category ORDER BY id asc limit 1) WHERE app.category_id is NULL;'
op.execute(query)
op.alter_column('app', 'category_id', nullable=False)
def downgrade():
op.alter_column('app', 'category_id', nullable=True)
|
<commit_before><commit_msg>Add alembic revision for category constraint in project<commit_after>
|
"""Add category constraint to app/project
Revision ID: 537db2979434
Revises: 7927d63d556
Create Date: 2014-09-25 10:39:57.300726
"""
# revision identifiers, used by Alembic.
revision = '537db2979434'
down_revision = '7927d63d556'
from alembic import op
import sqlalchemy as sa
def upgrade():
query = 'UPDATE app SET category_id=(SELECT id FROM category ORDER BY id asc limit 1) WHERE app.category_id is NULL;'
op.execute(query)
op.alter_column('app', 'category_id', nullable=False)
def downgrade():
op.alter_column('app', 'category_id', nullable=True)
|
Add alembic revision for category constraint in project"""Add category constraint to app/project
Revision ID: 537db2979434
Revises: 7927d63d556
Create Date: 2014-09-25 10:39:57.300726
"""
# revision identifiers, used by Alembic.
revision = '537db2979434'
down_revision = '7927d63d556'
from alembic import op
import sqlalchemy as sa
def upgrade():
query = 'UPDATE app SET category_id=(SELECT id FROM category ORDER BY id asc limit 1) WHERE app.category_id is NULL;'
op.execute(query)
op.alter_column('app', 'category_id', nullable=False)
def downgrade():
op.alter_column('app', 'category_id', nullable=True)
|
<commit_before><commit_msg>Add alembic revision for category constraint in project<commit_after>"""Add category constraint to app/project
Revision ID: 537db2979434
Revises: 7927d63d556
Create Date: 2014-09-25 10:39:57.300726
"""
# revision identifiers, used by Alembic.
revision = '537db2979434'
down_revision = '7927d63d556'
from alembic import op
import sqlalchemy as sa
def upgrade():
query = 'UPDATE app SET category_id=(SELECT id FROM category ORDER BY id asc limit 1) WHERE app.category_id is NULL;'
op.execute(query)
op.alter_column('app', 'category_id', nullable=False)
def downgrade():
op.alter_column('app', 'category_id', nullable=True)
|
|
ed1a2c227ca7e83418d5741116e34962ce9c0039
|
data/visualizations.py
|
data/visualizations.py
|
import csv
import matplotlib.pyplot as plt
from datetime import datetime
import sys
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Panel
from scipy.interpolate import spline
def list_str_to_int(input):
str_hold = "".join(input)
return int(str_hold)
def time_series_avg_wind_speeds(curr_windset):
""" avg wind speed (per day) forecast plots for a single wind farm
"""
time_series = []
wind_speeds = []
prev = None
curr_date_speeds = []
for row in curr_windset:
if row[0] != 'date':
date = row[0]
wind_speed = row[5]
date_arr = list(date)
year = list_str_to_int(date_arr[0:4])
month = list_str_to_int(date_arr[4:6])
time_series_entry = datetime(year, month, 1)
if wind_speed != 'NA':
if (time_series_entry != prev) and (prev != None):
avg_wind_speed = np.mean(curr_date_speeds)
wind_speeds.append(avg_wind_speed)
time_series.append(time_series_entry)
curr_date_speeds = []
else:
curr_date_speeds.append(float(wind_speed))
# print curr_date_speeds
prev = time_series_entry
plt.plot(time_series, wind_speeds)
plt.savefig('plots/'+str(sys.argv[1] + '_avg.pdf'))
plt.show()
if __name__ == '__main__':
curr_windset = csv.reader(open(sys.argv[1], 'r'))
time_series_avg_wind_speeds(curr_windset)
|
Add initial matploblib average plots for wind speed.
|
Add initial matploblib average plots for wind speed.
|
Python
|
mit
|
avishek1013/windly
|
Add initial matploblib average plots for wind speed.
|
import csv
import matplotlib.pyplot as plt
from datetime import datetime
import sys
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Panel
from scipy.interpolate import spline
def list_str_to_int(input):
str_hold = "".join(input)
return int(str_hold)
def time_series_avg_wind_speeds(curr_windset):
""" avg wind speed (per day) forecast plots for a single wind farm
"""
time_series = []
wind_speeds = []
prev = None
curr_date_speeds = []
for row in curr_windset:
if row[0] != 'date':
date = row[0]
wind_speed = row[5]
date_arr = list(date)
year = list_str_to_int(date_arr[0:4])
month = list_str_to_int(date_arr[4:6])
time_series_entry = datetime(year, month, 1)
if wind_speed != 'NA':
if (time_series_entry != prev) and (prev != None):
avg_wind_speed = np.mean(curr_date_speeds)
wind_speeds.append(avg_wind_speed)
time_series.append(time_series_entry)
curr_date_speeds = []
else:
curr_date_speeds.append(float(wind_speed))
# print curr_date_speeds
prev = time_series_entry
plt.plot(time_series, wind_speeds)
plt.savefig('plots/'+str(sys.argv[1] + '_avg.pdf'))
plt.show()
if __name__ == '__main__':
curr_windset = csv.reader(open(sys.argv[1], 'r'))
time_series_avg_wind_speeds(curr_windset)
|
<commit_before><commit_msg>Add initial matploblib average plots for wind speed.<commit_after>
|
import csv
import matplotlib.pyplot as plt
from datetime import datetime
import sys
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Panel
from scipy.interpolate import spline
def list_str_to_int(input):
str_hold = "".join(input)
return int(str_hold)
def time_series_avg_wind_speeds(curr_windset):
""" avg wind speed (per day) forecast plots for a single wind farm
"""
time_series = []
wind_speeds = []
prev = None
curr_date_speeds = []
for row in curr_windset:
if row[0] != 'date':
date = row[0]
wind_speed = row[5]
date_arr = list(date)
year = list_str_to_int(date_arr[0:4])
month = list_str_to_int(date_arr[4:6])
time_series_entry = datetime(year, month, 1)
if wind_speed != 'NA':
if (time_series_entry != prev) and (prev != None):
avg_wind_speed = np.mean(curr_date_speeds)
wind_speeds.append(avg_wind_speed)
time_series.append(time_series_entry)
curr_date_speeds = []
else:
curr_date_speeds.append(float(wind_speed))
# print curr_date_speeds
prev = time_series_entry
plt.plot(time_series, wind_speeds)
plt.savefig('plots/'+str(sys.argv[1] + '_avg.pdf'))
plt.show()
if __name__ == '__main__':
curr_windset = csv.reader(open(sys.argv[1], 'r'))
time_series_avg_wind_speeds(curr_windset)
|
Add initial matploblib average plots for wind speed.import csv
import matplotlib.pyplot as plt
from datetime import datetime
import sys
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Panel
from scipy.interpolate import spline
def list_str_to_int(input):
str_hold = "".join(input)
return int(str_hold)
def time_series_avg_wind_speeds(curr_windset):
""" avg wind speed (per day) forecast plots for a single wind farm
"""
time_series = []
wind_speeds = []
prev = None
curr_date_speeds = []
for row in curr_windset:
if row[0] != 'date':
date = row[0]
wind_speed = row[5]
date_arr = list(date)
year = list_str_to_int(date_arr[0:4])
month = list_str_to_int(date_arr[4:6])
time_series_entry = datetime(year, month, 1)
if wind_speed != 'NA':
if (time_series_entry != prev) and (prev != None):
avg_wind_speed = np.mean(curr_date_speeds)
wind_speeds.append(avg_wind_speed)
time_series.append(time_series_entry)
curr_date_speeds = []
else:
curr_date_speeds.append(float(wind_speed))
# print curr_date_speeds
prev = time_series_entry
plt.plot(time_series, wind_speeds)
plt.savefig('plots/'+str(sys.argv[1] + '_avg.pdf'))
plt.show()
if __name__ == '__main__':
curr_windset = csv.reader(open(sys.argv[1], 'r'))
time_series_avg_wind_speeds(curr_windset)
|
<commit_before><commit_msg>Add initial matploblib average plots for wind speed.<commit_after>import csv
import matplotlib.pyplot as plt
from datetime import datetime
import sys
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, Panel
from scipy.interpolate import spline
def list_str_to_int(input):
str_hold = "".join(input)
return int(str_hold)
def time_series_avg_wind_speeds(curr_windset):
""" avg wind speed (per day) forecast plots for a single wind farm
"""
time_series = []
wind_speeds = []
prev = None
curr_date_speeds = []
for row in curr_windset:
if row[0] != 'date':
date = row[0]
wind_speed = row[5]
date_arr = list(date)
year = list_str_to_int(date_arr[0:4])
month = list_str_to_int(date_arr[4:6])
time_series_entry = datetime(year, month, 1)
if wind_speed != 'NA':
if (time_series_entry != prev) and (prev != None):
avg_wind_speed = np.mean(curr_date_speeds)
wind_speeds.append(avg_wind_speed)
time_series.append(time_series_entry)
curr_date_speeds = []
else:
curr_date_speeds.append(float(wind_speed))
# print curr_date_speeds
prev = time_series_entry
plt.plot(time_series, wind_speeds)
plt.savefig('plots/'+str(sys.argv[1] + '_avg.pdf'))
plt.show()
if __name__ == '__main__':
curr_windset = csv.reader(open(sys.argv[1], 'r'))
time_series_avg_wind_speeds(curr_windset)
|
|
1b4962c62e9fad96fa1282823bd3adac4030abb4
|
ny_to_chi_test.py
|
ny_to_chi_test.py
|
from matplotlib import pyplot as plt
from greengraph import Greengraph
mygraph=Greengraph('New York','Chicago')
data = mygraph.green_between(20)
plt.plot(data)
plt.show()
|
Include test to see if the package can be imported and used.
|
Include test to see if the package can be imported and used.
|
Python
|
apache-2.0
|
paulsbrookes/greengraph
|
Include test to see if the package can be imported and used.
|
from matplotlib import pyplot as plt
from greengraph import Greengraph
mygraph=Greengraph('New York','Chicago')
data = mygraph.green_between(20)
plt.plot(data)
plt.show()
|
<commit_before><commit_msg>Include test to see if the package can be imported and used.<commit_after>
|
from matplotlib import pyplot as plt
from greengraph import Greengraph
mygraph=Greengraph('New York','Chicago')
data = mygraph.green_between(20)
plt.plot(data)
plt.show()
|
Include test to see if the package can be imported and used.from matplotlib import pyplot as plt
from greengraph import Greengraph
mygraph=Greengraph('New York','Chicago')
data = mygraph.green_between(20)
plt.plot(data)
plt.show()
|
<commit_before><commit_msg>Include test to see if the package can be imported and used.<commit_after>from matplotlib import pyplot as plt
from greengraph import Greengraph
mygraph=Greengraph('New York','Chicago')
data = mygraph.green_between(20)
plt.plot(data)
plt.show()
|
|
52d4e1e3b962963de9c17c12106bdf957434a62e
|
subsample_signals.py
|
subsample_signals.py
|
"""
Subsample the signals files to demultipliate the number of
training samples.
Just indicate the input and output directory.
Use python 3 (but should be working with python 2)
"""
import os, sys
import random
import numpy as np
import utils
# Set directories
root = os.getcwd()
dirInSignals = root + '/../Data/Test_mesh_01/signals/'
dirOutSamples = root + '/../Data/Test_mesh_01/samples/'
def main():
# Global check
assert(os.path.exists(dirInSignals))
assert(os.path.exists(dirOutSamples))
# For each mesh
signalsFilesList = utils.sortFiles(os.listdir(dirInSignals))
for signalFilename in signalsFilesList:
# Load signal
print('Subsample ', signalFilename)
idSignal = signalFilename.split('.')[0] # Little hack to get the id
completeSignal = utils.loadLabelList(dirInSignals + signalFilename)
# For each signal, we generate multples samples
for i in range(500): # TODO: Tune this variable (dynamically depend of the signal ?)
decimatedSignal = np.copy(completeSignal)
for j in range(len(completeSignal)): # Iterate over
if completeSignal[j] == 1: # Candidate for subsampling
if random.randrange(2) == 0: # 50% chance of removal
decimatedSignal[j] = 0 # Subsample
utils.saveLabelList(decimatedSignal, dirOutSamples + idSignal + '_' + str(i) + '.txt') # Save
if __name__ == "__main__":
main()
|
Add script to subsample signals
|
Add script to subsample signals
|
Python
|
apache-2.0
|
Conchylicultor/DeepLearningOnGraph,Conchylicultor/DeepLearningOnGraph,Conchylicultor/DeepLearningOnGraph
|
Add script to subsample signals
|
"""
Subsample the signals files to demultipliate the number of
training samples.
Just indicate the input and output directory.
Use python 3 (but should be working with python 2)
"""
import os, sys
import random
import numpy as np
import utils
# Set directories
root = os.getcwd()
dirInSignals = root + '/../Data/Test_mesh_01/signals/'
dirOutSamples = root + '/../Data/Test_mesh_01/samples/'
def main():
# Global check
assert(os.path.exists(dirInSignals))
assert(os.path.exists(dirOutSamples))
# For each mesh
signalsFilesList = utils.sortFiles(os.listdir(dirInSignals))
for signalFilename in signalsFilesList:
# Load signal
print('Subsample ', signalFilename)
idSignal = signalFilename.split('.')[0] # Little hack to get the id
completeSignal = utils.loadLabelList(dirInSignals + signalFilename)
# For each signal, we generate multples samples
for i in range(500): # TODO: Tune this variable (dynamically depend of the signal ?)
decimatedSignal = np.copy(completeSignal)
for j in range(len(completeSignal)): # Iterate over
if completeSignal[j] == 1: # Candidate for subsampling
if random.randrange(2) == 0: # 50% chance of removal
decimatedSignal[j] = 0 # Subsample
utils.saveLabelList(decimatedSignal, dirOutSamples + idSignal + '_' + str(i) + '.txt') # Save
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to subsample signals<commit_after>
|
"""
Subsample the signals files to demultipliate the number of
training samples.
Just indicate the input and output directory.
Use python 3 (but should be working with python 2)
"""
import os, sys
import random
import numpy as np
import utils
# Set directories
root = os.getcwd()
dirInSignals = root + '/../Data/Test_mesh_01/signals/'
dirOutSamples = root + '/../Data/Test_mesh_01/samples/'
def main():
# Global check
assert(os.path.exists(dirInSignals))
assert(os.path.exists(dirOutSamples))
# For each mesh
signalsFilesList = utils.sortFiles(os.listdir(dirInSignals))
for signalFilename in signalsFilesList:
# Load signal
print('Subsample ', signalFilename)
idSignal = signalFilename.split('.')[0] # Little hack to get the id
completeSignal = utils.loadLabelList(dirInSignals + signalFilename)
# For each signal, we generate multples samples
for i in range(500): # TODO: Tune this variable (dynamically depend of the signal ?)
decimatedSignal = np.copy(completeSignal)
for j in range(len(completeSignal)): # Iterate over
if completeSignal[j] == 1: # Candidate for subsampling
if random.randrange(2) == 0: # 50% chance of removal
decimatedSignal[j] = 0 # Subsample
utils.saveLabelList(decimatedSignal, dirOutSamples + idSignal + '_' + str(i) + '.txt') # Save
if __name__ == "__main__":
main()
|
Add script to subsample signals"""
Subsample the signals files to demultipliate the number of
training samples.
Just indicate the input and output directory.
Use python 3 (but should be working with python 2)
"""
import os, sys
import random
import numpy as np
import utils
# Set directories
root = os.getcwd()
dirInSignals = root + '/../Data/Test_mesh_01/signals/'
dirOutSamples = root + '/../Data/Test_mesh_01/samples/'
def main():
# Global check
assert(os.path.exists(dirInSignals))
assert(os.path.exists(dirOutSamples))
# For each mesh
signalsFilesList = utils.sortFiles(os.listdir(dirInSignals))
for signalFilename in signalsFilesList:
# Load signal
print('Subsample ', signalFilename)
idSignal = signalFilename.split('.')[0] # Little hack to get the id
completeSignal = utils.loadLabelList(dirInSignals + signalFilename)
# For each signal, we generate multples samples
for i in range(500): # TODO: Tune this variable (dynamically depend of the signal ?)
decimatedSignal = np.copy(completeSignal)
for j in range(len(completeSignal)): # Iterate over
if completeSignal[j] == 1: # Candidate for subsampling
if random.randrange(2) == 0: # 50% chance of removal
decimatedSignal[j] = 0 # Subsample
utils.saveLabelList(decimatedSignal, dirOutSamples + idSignal + '_' + str(i) + '.txt') # Save
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add script to subsample signals<commit_after>"""
Subsample the signals files to demultipliate the number of
training samples.
Just indicate the input and output directory.
Use python 3 (but should be working with python 2)
"""
import os, sys
import random
import numpy as np
import utils
# Set directories
root = os.getcwd()
dirInSignals = root + '/../Data/Test_mesh_01/signals/'
dirOutSamples = root + '/../Data/Test_mesh_01/samples/'
def main():
# Global check
assert(os.path.exists(dirInSignals))
assert(os.path.exists(dirOutSamples))
# For each mesh
signalsFilesList = utils.sortFiles(os.listdir(dirInSignals))
for signalFilename in signalsFilesList:
# Load signal
print('Subsample ', signalFilename)
idSignal = signalFilename.split('.')[0] # Little hack to get the id
completeSignal = utils.loadLabelList(dirInSignals + signalFilename)
# For each signal, we generate multples samples
for i in range(500): # TODO: Tune this variable (dynamically depend of the signal ?)
decimatedSignal = np.copy(completeSignal)
for j in range(len(completeSignal)): # Iterate over
if completeSignal[j] == 1: # Candidate for subsampling
if random.randrange(2) == 0: # 50% chance of removal
decimatedSignal[j] = 0 # Subsample
utils.saveLabelList(decimatedSignal, dirOutSamples + idSignal + '_' + str(i) + '.txt') # Save
if __name__ == "__main__":
main()
|
|
58a9c449c59767129fe75f6efecb44eb3fa6f3e4
|
Graphs/depthFirstSearch.py
|
Graphs/depthFirstSearch.py
|
#!/usr/local/bin/python
# edX Intro to Computational Thinking and Data Science
# Graphs - Depth First Search to find shortest path lecture code
import graphs
def printPath(path):
"""Assumes path is a list of nodes"""
result = ''
for i in range(len(path)):
result += str(path[i])
if i != len(path) - 1:
result += '->'
return result
def DFS(graph, start, end, path, shortest, toPrint=False):
"""Assumes graph is a Digraph: start and end are nodes; path and shortest
are lists of nodes.
Returns a shortest path from start to end in graph"""
path = path + [start]
if toPrint:
print('Current DFS path:', printPath(path))
if start == end:
return path
for node in graph.childrenOf(start):
if node not in path: # avoid cycles
if shortest is None or len(path) < len(shortest):
newPath = DFS(graph, node, end, path, shortest, toPrint)
if newPath is not None:
shortest = newPath
elif toPrint:
print('Already visited', node)
return shortest
def BFS(graph, start, end, toPrint=False):
initPath = [start]
pathQueue = [initPath]
if toPrint:
print('Current BFS path: {}'.format(printPath(pathQueue)))
while len(pathQueue) != 0:
# Get and remove oldest element in pathQueue
tmpPath = pathQueue.pop(0)
print('Current BFS path: {}'.format(printPath(tmpPath)))
lastNode = tmpPath[-1]
if lastNode == end:
return tmpPath
for nextNode in graph.childrenOf(lastNode):
if nextNode not in tmpPath:
newPath = tmpPath + [nextNode]
pathQueue.append(newPath)
return None
def shortestPath(graph, start, end, toPrint=False):
return DFS(graph, start, end, [], None, toPrint)
def testSP(source, destination):
g = graphs.buildCityGraph(graphs.Digraph)
sp = shortestPath(g, g.getNode(source), g.getNode(destination),
toPrint=True)
if sp is not None:
print('Shortest path from {} to {} is {}'
.format(source, destination, printPath(sp)))
else:
print('There is no path from {} to {}'.format(source, destination))
def main():
test_shortest_path = False
if test_shortest_path:
testSP('Chicago', 'Boston')
testSP('Boston', 'Phoenix')
if __name__ == '__main__':
main()
|
Add depth first search for graphs
|
Add depth first search for graphs
|
Python
|
mit
|
HKuz/Test_Code
|
Add depth first search for graphs
|
#!/usr/local/bin/python
# edX Intro to Computational Thinking and Data Science
# Graphs - Depth First Search to find shortest path lecture code
import graphs
def printPath(path):
"""Assumes path is a list of nodes"""
result = ''
for i in range(len(path)):
result += str(path[i])
if i != len(path) - 1:
result += '->'
return result
def DFS(graph, start, end, path, shortest, toPrint=False):
"""Assumes graph is a Digraph: start and end are nodes; path and shortest
are lists of nodes.
Returns a shortest path from start to end in graph"""
path = path + [start]
if toPrint:
print('Current DFS path:', printPath(path))
if start == end:
return path
for node in graph.childrenOf(start):
if node not in path: # avoid cycles
if shortest is None or len(path) < len(shortest):
newPath = DFS(graph, node, end, path, shortest, toPrint)
if newPath is not None:
shortest = newPath
elif toPrint:
print('Already visited', node)
return shortest
def BFS(graph, start, end, toPrint=False):
initPath = [start]
pathQueue = [initPath]
if toPrint:
print('Current BFS path: {}'.format(printPath(pathQueue)))
while len(pathQueue) != 0:
# Get and remove oldest element in pathQueue
tmpPath = pathQueue.pop(0)
print('Current BFS path: {}'.format(printPath(tmpPath)))
lastNode = tmpPath[-1]
if lastNode == end:
return tmpPath
for nextNode in graph.childrenOf(lastNode):
if nextNode not in tmpPath:
newPath = tmpPath + [nextNode]
pathQueue.append(newPath)
return None
def shortestPath(graph, start, end, toPrint=False):
return DFS(graph, start, end, [], None, toPrint)
def testSP(source, destination):
g = graphs.buildCityGraph(graphs.Digraph)
sp = shortestPath(g, g.getNode(source), g.getNode(destination),
toPrint=True)
if sp is not None:
print('Shortest path from {} to {} is {}'
.format(source, destination, printPath(sp)))
else:
print('There is no path from {} to {}'.format(source, destination))
def main():
test_shortest_path = False
if test_shortest_path:
testSP('Chicago', 'Boston')
testSP('Boston', 'Phoenix')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add depth first search for graphs<commit_after>
|
#!/usr/local/bin/python
# edX Intro to Computational Thinking and Data Science
# Graphs - Depth First Search to find shortest path lecture code
import graphs
def printPath(path):
"""Assumes path is a list of nodes"""
result = ''
for i in range(len(path)):
result += str(path[i])
if i != len(path) - 1:
result += '->'
return result
def DFS(graph, start, end, path, shortest, toPrint=False):
"""Assumes graph is a Digraph: start and end are nodes; path and shortest
are lists of nodes.
Returns a shortest path from start to end in graph"""
path = path + [start]
if toPrint:
print('Current DFS path:', printPath(path))
if start == end:
return path
for node in graph.childrenOf(start):
if node not in path: # avoid cycles
if shortest is None or len(path) < len(shortest):
newPath = DFS(graph, node, end, path, shortest, toPrint)
if newPath is not None:
shortest = newPath
elif toPrint:
print('Already visited', node)
return shortest
def BFS(graph, start, end, toPrint=False):
initPath = [start]
pathQueue = [initPath]
if toPrint:
print('Current BFS path: {}'.format(printPath(pathQueue)))
while len(pathQueue) != 0:
# Get and remove oldest element in pathQueue
tmpPath = pathQueue.pop(0)
print('Current BFS path: {}'.format(printPath(tmpPath)))
lastNode = tmpPath[-1]
if lastNode == end:
return tmpPath
for nextNode in graph.childrenOf(lastNode):
if nextNode not in tmpPath:
newPath = tmpPath + [nextNode]
pathQueue.append(newPath)
return None
def shortestPath(graph, start, end, toPrint=False):
return DFS(graph, start, end, [], None, toPrint)
def testSP(source, destination):
g = graphs.buildCityGraph(graphs.Digraph)
sp = shortestPath(g, g.getNode(source), g.getNode(destination),
toPrint=True)
if sp is not None:
print('Shortest path from {} to {} is {}'
.format(source, destination, printPath(sp)))
else:
print('There is no path from {} to {}'.format(source, destination))
def main():
test_shortest_path = False
if test_shortest_path:
testSP('Chicago', 'Boston')
testSP('Boston', 'Phoenix')
if __name__ == '__main__':
main()
|
Add depth first search for graphs#!/usr/local/bin/python
# edX Intro to Computational Thinking and Data Science
# Graphs - Depth First Search to find shortest path lecture code
import graphs
def printPath(path):
"""Assumes path is a list of nodes"""
result = ''
for i in range(len(path)):
result += str(path[i])
if i != len(path) - 1:
result += '->'
return result
def DFS(graph, start, end, path, shortest, toPrint=False):
"""Assumes graph is a Digraph: start and end are nodes; path and shortest
are lists of nodes.
Returns a shortest path from start to end in graph"""
path = path + [start]
if toPrint:
print('Current DFS path:', printPath(path))
if start == end:
return path
for node in graph.childrenOf(start):
if node not in path: # avoid cycles
if shortest is None or len(path) < len(shortest):
newPath = DFS(graph, node, end, path, shortest, toPrint)
if newPath is not None:
shortest = newPath
elif toPrint:
print('Already visited', node)
return shortest
def BFS(graph, start, end, toPrint=False):
initPath = [start]
pathQueue = [initPath]
if toPrint:
print('Current BFS path: {}'.format(printPath(pathQueue)))
while len(pathQueue) != 0:
# Get and remove oldest element in pathQueue
tmpPath = pathQueue.pop(0)
print('Current BFS path: {}'.format(printPath(tmpPath)))
lastNode = tmpPath[-1]
if lastNode == end:
return tmpPath
for nextNode in graph.childrenOf(lastNode):
if nextNode not in tmpPath:
newPath = tmpPath + [nextNode]
pathQueue.append(newPath)
return None
def shortestPath(graph, start, end, toPrint=False):
return DFS(graph, start, end, [], None, toPrint)
def testSP(source, destination):
g = graphs.buildCityGraph(graphs.Digraph)
sp = shortestPath(g, g.getNode(source), g.getNode(destination),
toPrint=True)
if sp is not None:
print('Shortest path from {} to {} is {}'
.format(source, destination, printPath(sp)))
else:
print('There is no path from {} to {}'.format(source, destination))
def main():
test_shortest_path = False
if test_shortest_path:
testSP('Chicago', 'Boston')
testSP('Boston', 'Phoenix')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add depth first search for graphs<commit_after>#!/usr/local/bin/python
# edX Intro to Computational Thinking and Data Science
# Graphs - Depth First Search to find shortest path lecture code
import graphs
def printPath(path):
"""Assumes path is a list of nodes"""
result = ''
for i in range(len(path)):
result += str(path[i])
if i != len(path) - 1:
result += '->'
return result
def DFS(graph, start, end, path, shortest, toPrint=False):
"""Assumes graph is a Digraph: start and end are nodes; path and shortest
are lists of nodes.
Returns a shortest path from start to end in graph"""
path = path + [start]
if toPrint:
print('Current DFS path:', printPath(path))
if start == end:
return path
for node in graph.childrenOf(start):
if node not in path: # avoid cycles
if shortest is None or len(path) < len(shortest):
newPath = DFS(graph, node, end, path, shortest, toPrint)
if newPath is not None:
shortest = newPath
elif toPrint:
print('Already visited', node)
return shortest
def BFS(graph, start, end, toPrint=False):
initPath = [start]
pathQueue = [initPath]
if toPrint:
print('Current BFS path: {}'.format(printPath(pathQueue)))
while len(pathQueue) != 0:
# Get and remove oldest element in pathQueue
tmpPath = pathQueue.pop(0)
print('Current BFS path: {}'.format(printPath(tmpPath)))
lastNode = tmpPath[-1]
if lastNode == end:
return tmpPath
for nextNode in graph.childrenOf(lastNode):
if nextNode not in tmpPath:
newPath = tmpPath + [nextNode]
pathQueue.append(newPath)
return None
def shortestPath(graph, start, end, toPrint=False):
return DFS(graph, start, end, [], None, toPrint)
def testSP(source, destination):
g = graphs.buildCityGraph(graphs.Digraph)
sp = shortestPath(g, g.getNode(source), g.getNode(destination),
toPrint=True)
if sp is not None:
print('Shortest path from {} to {} is {}'
.format(source, destination, printPath(sp)))
else:
print('There is no path from {} to {}'.format(source, destination))
def main():
test_shortest_path = False
if test_shortest_path:
testSP('Chicago', 'Boston')
testSP('Boston', 'Phoenix')
if __name__ == '__main__':
main()
|
|
c568189313f96af68fdca93ffc65b528e3964e06
|
src/tests/test_api_users.py
|
src/tests/test_api_users.py
|
#!/usr/bin/python
#
# Copyright Friday Film Club. All Rights Reserved.
"""Users API unit tests."""
__author__ = 'adamjmcgrath@gmail.com (Adam McGrath)'
import unittest
from google.appengine.ext import ndb
import base
import helpers
class ApiTestCase(base.TestCase):
def setUp(self):
super(ApiTestCase, self).setUp()
self.testbed.init_search_stub()
ndb.put_multi([
helpers.user(username='foo'),
helpers.user(username='bar'),
helpers.user(username='baz'),
])
def testUserSearch(self):
response = self.get_json('/api/users/foo')
self.assertEqual(len(response), 1)
self.assertEqual(response[0]['username'], 'foo')
response = self.get_json('/api/users/ba')
self.assertEqual(len(response), 2)
if __name__ == '__main__':
unittest.main()
|
Add tests for users api
|
Add tests for users api
|
Python
|
mpl-2.0
|
adamjmcgrath/fridayfilmclub,adamjmcgrath/fridayfilmclub,adamjmcgrath/fridayfilmclub,adamjmcgrath/fridayfilmclub
|
Add tests for users api
|
#!/usr/bin/python
#
# Copyright Friday Film Club. All Rights Reserved.
"""Users API unit tests."""
__author__ = 'adamjmcgrath@gmail.com (Adam McGrath)'
import unittest
from google.appengine.ext import ndb
import base
import helpers
class ApiTestCase(base.TestCase):
def setUp(self):
super(ApiTestCase, self).setUp()
self.testbed.init_search_stub()
ndb.put_multi([
helpers.user(username='foo'),
helpers.user(username='bar'),
helpers.user(username='baz'),
])
def testUserSearch(self):
response = self.get_json('/api/users/foo')
self.assertEqual(len(response), 1)
self.assertEqual(response[0]['username'], 'foo')
response = self.get_json('/api/users/ba')
self.assertEqual(len(response), 2)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for users api<commit_after>
|
#!/usr/bin/python
#
# Copyright Friday Film Club. All Rights Reserved.
"""Users API unit tests."""
__author__ = 'adamjmcgrath@gmail.com (Adam McGrath)'
import unittest
from google.appengine.ext import ndb
import base
import helpers
class ApiTestCase(base.TestCase):
def setUp(self):
super(ApiTestCase, self).setUp()
self.testbed.init_search_stub()
ndb.put_multi([
helpers.user(username='foo'),
helpers.user(username='bar'),
helpers.user(username='baz'),
])
def testUserSearch(self):
response = self.get_json('/api/users/foo')
self.assertEqual(len(response), 1)
self.assertEqual(response[0]['username'], 'foo')
response = self.get_json('/api/users/ba')
self.assertEqual(len(response), 2)
if __name__ == '__main__':
unittest.main()
|
Add tests for users api#!/usr/bin/python
#
# Copyright Friday Film Club. All Rights Reserved.
"""Users API unit tests."""
__author__ = 'adamjmcgrath@gmail.com (Adam McGrath)'
import unittest
from google.appengine.ext import ndb
import base
import helpers
class ApiTestCase(base.TestCase):
def setUp(self):
super(ApiTestCase, self).setUp()
self.testbed.init_search_stub()
ndb.put_multi([
helpers.user(username='foo'),
helpers.user(username='bar'),
helpers.user(username='baz'),
])
def testUserSearch(self):
response = self.get_json('/api/users/foo')
self.assertEqual(len(response), 1)
self.assertEqual(response[0]['username'], 'foo')
response = self.get_json('/api/users/ba')
self.assertEqual(len(response), 2)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for users api<commit_after>#!/usr/bin/python
#
# Copyright Friday Film Club. All Rights Reserved.
"""Users API unit tests."""
__author__ = 'adamjmcgrath@gmail.com (Adam McGrath)'
import unittest
from google.appengine.ext import ndb
import base
import helpers
class ApiTestCase(base.TestCase):
def setUp(self):
super(ApiTestCase, self).setUp()
self.testbed.init_search_stub()
ndb.put_multi([
helpers.user(username='foo'),
helpers.user(username='bar'),
helpers.user(username='baz'),
])
def testUserSearch(self):
response = self.get_json('/api/users/foo')
self.assertEqual(len(response), 1)
self.assertEqual(response[0]['username'], 'foo')
response = self.get_json('/api/users/ba')
self.assertEqual(len(response), 2)
if __name__ == '__main__':
unittest.main()
|
|
ec14651411d3489e85cabc323bb6fa90eeb7041a
|
third_party/gpus/compress_find_cuda_config.py
|
third_party/gpus/compress_find_cuda_config.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compresses the contents of find_cuda_config.py.oss.
The compressed file is what is actually being used. It works around remote
config not being able to upload files yet.
"""
import base64
import zlib
def main():
with open('find_cuda_config.py.oss', 'rb') as f:
data = f.read()
compressed = zlib.compress(data)
b64encoded = base64.b64encode(compressed)
with open('find_cuda_config.py.gz.base64.oss', 'wb') as f:
f.write(b64encoded)
if __name__ == '__main__':
main()
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compresses the contents of 'find_cuda.py'.
The compressed file is what is actually being used. It works around remote
config not being able to upload files yet.
"""
import base64
import zlib
def main():
with open('find_cuda.py', 'rb') as f:
data = f.read()
compressed = zlib.compress(data)
b64encoded = base64.b64encode(compressed)
with open('find_cuda.py.gz.base64', 'wb') as f:
f.write(b64encoded)
if __name__ == '__main__':
main()
|
Remove .oss from find_cuda_config in compression script.
|
Remove .oss from find_cuda_config in compression script.
See https://github.com/tensorflow/tensorflow/pull/40759
PiperOrigin-RevId: 318452377
Change-Id: I04f3ad1c8cf9cac5446d0a1196ebbf66660bf312
|
Python
|
apache-2.0
|
freedomtan/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,davidzchen/tensorflow,karllessard/tensorflow,Intel-Corporation/tensorflow,cxxgtxy/tensorflow,annarev/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_saved_model,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-Corporation/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,frreiss/tensorflow-fred,aam-at/tensorflow,cxxgtxy/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,petewarden/tensorflow,aam-at/tensorflow,davidzchen/tensorflow,petewarden/tensorflow,tensorflow/tensorflow-pywrap_saved_model,annarev/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,petewarden/tensorflow,yongtang/tensorflow,frreiss/tensorflow-fred,Intel-Corporation/tensorflow,karllessard/tensorflow,freedomtan/tensorflow,annarev/tensorflow,aam-at/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,sarvex/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,sarvex/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,davidzchen/tensorflow,aldian/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_saved_model,Intel-Corporation/tensorflow,cxxgtxy/tensorflow,yongtang/tensorflow,sarvex/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,cxxgtxy/tensorflow,sarvex/tensorflow,Intel-Corporation/tensorflow,aam-at/tensorflow,gautam1858/tensorflow,sarvex/tensorflow,gautam1858/tensorflow,Intel-tensorflow/tensorflow,freedomtan/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,aam-at/tensorflow,paolodedios/tensorflow,sarvex/tensorflow,yongtang/tensorflow,davidzchen/tensorflow,freedomtan/tensorflow,cxxgtxy/tensorflow,davidzchen/tensorflow,annarev/tensorflow,freedomtan/tensorflow,aldian/tensorflow,karllessard/tensorflow,annarev/tensorflow,frreiss/tensorflow-fred,petewarden/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,frreiss/tensorflow-fred,petewarden/tensorflow,aldian/tensorflow,cxxgtxy/tensorflow,karllessard/tensorflow,frreiss/tensorflow-fred,aldian/tensorflow,freedomtan/tensorflow,paolodedios/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,davidzchen/tensorflow,aam-at/tensorflow,davidzchen/tensorflow,cxxgtxy/tensorflow,annarev/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,freedomtan/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,aam-at/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,aam-at/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,aam-at/tensorflow,frreiss/tensorflow-fred,frreiss/tensorflow-fred,gautam1858/tensorflow,petewarden/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,petewarden/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,freedomtan/tensorflow,petewarden/tensorflow,Intel-tensorflow/tensorflow,freedomtan/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,annarev/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,frreiss/tensorflow-fred,annarev/tensorflow,Intel-tensorflow/tensorflow,sarvex/tensorflow,gautam1858/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,frreiss/tensorflow-fred,paolodedios/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,aam-at/tensorflow,tensorflow/tensorflow,annarev/tensorflow,petewarden/tensorflow,gautam1858/tensorflow,gautam1858/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,aam-at/tensorflow,karllessard/tensorflow,davidzchen/tensorflow,gautam1858/tensorflow,cxxgtxy/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow,paolodedios/tensorflow,gautam1858/tensorflow,sarvex/tensorflow,gautam1858/tensorflow,Intel-Corporation/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,annarev/tensorflow,Intel-Corporation/tensorflow,petewarden/tensorflow,yongtang/tensorflow,freedomtan/tensorflow,petewarden/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-Corporation/tensorflow,aldian/tensorflow,aam-at/tensorflow,annarev/tensorflow,Intel-tensorflow/tensorflow,petewarden/tensorflow,paolodedios/tensorflow,aldian/tensorflow,aldian/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,aldian/tensorflow,tensorflow/tensorflow-pywrap_saved_model,freedomtan/tensorflow
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compresses the contents of find_cuda_config.py.oss.
The compressed file is what is actually being used. It works around remote
config not being able to upload files yet.
"""
import base64
import zlib
def main():
with open('find_cuda_config.py.oss', 'rb') as f:
data = f.read()
compressed = zlib.compress(data)
b64encoded = base64.b64encode(compressed)
with open('find_cuda_config.py.gz.base64.oss', 'wb') as f:
f.write(b64encoded)
if __name__ == '__main__':
main()
Remove .oss from find_cuda_config in compression script.
See https://github.com/tensorflow/tensorflow/pull/40759
PiperOrigin-RevId: 318452377
Change-Id: I04f3ad1c8cf9cac5446d0a1196ebbf66660bf312
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compresses the contents of 'find_cuda.py'.
The compressed file is what is actually being used. It works around remote
config not being able to upload files yet.
"""
import base64
import zlib
def main():
with open('find_cuda.py', 'rb') as f:
data = f.read()
compressed = zlib.compress(data)
b64encoded = base64.b64encode(compressed)
with open('find_cuda.py.gz.base64', 'wb') as f:
f.write(b64encoded)
if __name__ == '__main__':
main()
|
<commit_before># Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compresses the contents of find_cuda_config.py.oss.
The compressed file is what is actually being used. It works around remote
config not being able to upload files yet.
"""
import base64
import zlib
def main():
with open('find_cuda_config.py.oss', 'rb') as f:
data = f.read()
compressed = zlib.compress(data)
b64encoded = base64.b64encode(compressed)
with open('find_cuda_config.py.gz.base64.oss', 'wb') as f:
f.write(b64encoded)
if __name__ == '__main__':
main()
<commit_msg>Remove .oss from find_cuda_config in compression script.
See https://github.com/tensorflow/tensorflow/pull/40759
PiperOrigin-RevId: 318452377
Change-Id: I04f3ad1c8cf9cac5446d0a1196ebbf66660bf312<commit_after>
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compresses the contents of 'find_cuda.py'.
The compressed file is what is actually being used. It works around remote
config not being able to upload files yet.
"""
import base64
import zlib
def main():
with open('find_cuda.py', 'rb') as f:
data = f.read()
compressed = zlib.compress(data)
b64encoded = base64.b64encode(compressed)
with open('find_cuda.py.gz.base64', 'wb') as f:
f.write(b64encoded)
if __name__ == '__main__':
main()
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compresses the contents of find_cuda_config.py.oss.
The compressed file is what is actually being used. It works around remote
config not being able to upload files yet.
"""
import base64
import zlib
def main():
with open('find_cuda_config.py.oss', 'rb') as f:
data = f.read()
compressed = zlib.compress(data)
b64encoded = base64.b64encode(compressed)
with open('find_cuda_config.py.gz.base64.oss', 'wb') as f:
f.write(b64encoded)
if __name__ == '__main__':
main()
Remove .oss from find_cuda_config in compression script.
See https://github.com/tensorflow/tensorflow/pull/40759
PiperOrigin-RevId: 318452377
Change-Id: I04f3ad1c8cf9cac5446d0a1196ebbf66660bf312# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compresses the contents of 'find_cuda.py'.
The compressed file is what is actually being used. It works around remote
config not being able to upload files yet.
"""
import base64
import zlib
def main():
with open('find_cuda.py', 'rb') as f:
data = f.read()
compressed = zlib.compress(data)
b64encoded = base64.b64encode(compressed)
with open('find_cuda.py.gz.base64', 'wb') as f:
f.write(b64encoded)
if __name__ == '__main__':
main()
|
<commit_before># Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compresses the contents of find_cuda_config.py.oss.
The compressed file is what is actually being used. It works around remote
config not being able to upload files yet.
"""
import base64
import zlib
def main():
with open('find_cuda_config.py.oss', 'rb') as f:
data = f.read()
compressed = zlib.compress(data)
b64encoded = base64.b64encode(compressed)
with open('find_cuda_config.py.gz.base64.oss', 'wb') as f:
f.write(b64encoded)
if __name__ == '__main__':
main()
<commit_msg>Remove .oss from find_cuda_config in compression script.
See https://github.com/tensorflow/tensorflow/pull/40759
PiperOrigin-RevId: 318452377
Change-Id: I04f3ad1c8cf9cac5446d0a1196ebbf66660bf312<commit_after># Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compresses the contents of 'find_cuda.py'.
The compressed file is what is actually being used. It works around remote
config not being able to upload files yet.
"""
import base64
import zlib
def main():
with open('find_cuda.py', 'rb') as f:
data = f.read()
compressed = zlib.compress(data)
b64encoded = base64.b64encode(compressed)
with open('find_cuda.py.gz.base64', 'wb') as f:
f.write(b64encoded)
if __name__ == '__main__':
main()
|
4cb361b8d6402392c5b4922f2f6793eb38c82c8e
|
test/RS485/readFromRS485.py
|
test/RS485/readFromRS485.py
|
#!/usr/bin/env python
#
# Python sample application that reads from the raspicomm's RS-485 Port
#
# Thanks to Acmesystems, program edited by Giovanni Manzoni @ HardElettroSoft
#
# 9600 8N1 flow control Xon/Xoff
#
import array
import serial
maxReadCount=10
readBuffer = array.array('c')
print('this sample application reads from the rs-485 port')
# open the port
print('opening device /dev/ttys1')
try:
ser = serial.Serial(port='/dev/ttyS1', baudrate=9600) # or ttyS2
except:
print('failed.')
print('possible causes:')
print('1) the raspicomm device driver is not loaded. type \'lsmod\' and verify that you \'raspicommrs485\' is loaded.')
print('2) the raspicomm device driver is in use. Is another application using the device driver?')
print('3) something went wrong when loading the device driver. type \'dmesg\' and check the kernel messages')
exit()
print('successful.')
# read in a loop
print('start reading from the rs-485 port a maximum of ' + str(maxReadCount) + ' bytes')
readCount=0
i=0
while readCount < maxReadCount:
readBuffer.append(ser.read(1))
readCount=readCount+1
# print the received bytes
print('we received the following bytes:')
val=ord(readBuffer[i])
hx=''
if val >= 32 and val <= 126:
hx=' - \'{0}\''.format(readBuffer[i])
print('[{0:d}]: 0x{1:x}{2}'.format(i, val, hx))
i=i+1
|
Add an example in Python for read from RS485
|
Add an example in Python for read from RS485
|
Python
|
cc0-1.0
|
hardelettrosoft/project2,hardelettrosoft/project2,giovannimanzoni/project2,giovannimanzoni/project2,hardelettrosoft/project2,giovannimanzoni/project2
|
Add an example in Python for read from RS485
|
#!/usr/bin/env python
#
# Python sample application that reads from the raspicomm's RS-485 Port
#
# Thanks to Acmesystems, program edited by Giovanni Manzoni @ HardElettroSoft
#
# 9600 8N1 flow control Xon/Xoff
#
import array
import serial
maxReadCount=10
readBuffer = array.array('c')
print('this sample application reads from the rs-485 port')
# open the port
print('opening device /dev/ttys1')
try:
ser = serial.Serial(port='/dev/ttyS1', baudrate=9600) # or ttyS2
except:
print('failed.')
print('possible causes:')
print('1) the raspicomm device driver is not loaded. type \'lsmod\' and verify that you \'raspicommrs485\' is loaded.')
print('2) the raspicomm device driver is in use. Is another application using the device driver?')
print('3) something went wrong when loading the device driver. type \'dmesg\' and check the kernel messages')
exit()
print('successful.')
# read in a loop
print('start reading from the rs-485 port a maximum of ' + str(maxReadCount) + ' bytes')
readCount=0
i=0
while readCount < maxReadCount:
readBuffer.append(ser.read(1))
readCount=readCount+1
# print the received bytes
print('we received the following bytes:')
val=ord(readBuffer[i])
hx=''
if val >= 32 and val <= 126:
hx=' - \'{0}\''.format(readBuffer[i])
print('[{0:d}]: 0x{1:x}{2}'.format(i, val, hx))
i=i+1
|
<commit_before><commit_msg>Add an example in Python for read from RS485<commit_after>
|
#!/usr/bin/env python
#
# Python sample application that reads from the raspicomm's RS-485 Port
#
# Thanks to Acmesystems, program edited by Giovanni Manzoni @ HardElettroSoft
#
# 9600 8N1 flow control Xon/Xoff
#
import array
import serial
maxReadCount=10
readBuffer = array.array('c')
print('this sample application reads from the rs-485 port')
# open the port
print('opening device /dev/ttys1')
try:
ser = serial.Serial(port='/dev/ttyS1', baudrate=9600) # or ttyS2
except:
print('failed.')
print('possible causes:')
print('1) the raspicomm device driver is not loaded. type \'lsmod\' and verify that you \'raspicommrs485\' is loaded.')
print('2) the raspicomm device driver is in use. Is another application using the device driver?')
print('3) something went wrong when loading the device driver. type \'dmesg\' and check the kernel messages')
exit()
print('successful.')
# read in a loop
print('start reading from the rs-485 port a maximum of ' + str(maxReadCount) + ' bytes')
readCount=0
i=0
while readCount < maxReadCount:
readBuffer.append(ser.read(1))
readCount=readCount+1
# print the received bytes
print('we received the following bytes:')
val=ord(readBuffer[i])
hx=''
if val >= 32 and val <= 126:
hx=' - \'{0}\''.format(readBuffer[i])
print('[{0:d}]: 0x{1:x}{2}'.format(i, val, hx))
i=i+1
|
Add an example in Python for read from RS485#!/usr/bin/env python
#
# Python sample application that reads from the raspicomm's RS-485 Port
#
# Thanks to Acmesystems, program edited by Giovanni Manzoni @ HardElettroSoft
#
# 9600 8N1 flow control Xon/Xoff
#
import array
import serial
maxReadCount=10
readBuffer = array.array('c')
print('this sample application reads from the rs-485 port')
# open the port
print('opening device /dev/ttys1')
try:
ser = serial.Serial(port='/dev/ttyS1', baudrate=9600) # or ttyS2
except:
print('failed.')
print('possible causes:')
print('1) the raspicomm device driver is not loaded. type \'lsmod\' and verify that you \'raspicommrs485\' is loaded.')
print('2) the raspicomm device driver is in use. Is another application using the device driver?')
print('3) something went wrong when loading the device driver. type \'dmesg\' and check the kernel messages')
exit()
print('successful.')
# read in a loop
print('start reading from the rs-485 port a maximum of ' + str(maxReadCount) + ' bytes')
readCount=0
i=0
while readCount < maxReadCount:
readBuffer.append(ser.read(1))
readCount=readCount+1
# print the received bytes
print('we received the following bytes:')
val=ord(readBuffer[i])
hx=''
if val >= 32 and val <= 126:
hx=' - \'{0}\''.format(readBuffer[i])
print('[{0:d}]: 0x{1:x}{2}'.format(i, val, hx))
i=i+1
|
<commit_before><commit_msg>Add an example in Python for read from RS485<commit_after>#!/usr/bin/env python
#
# Python sample application that reads from the raspicomm's RS-485 Port
#
# Thanks to Acmesystems, program edited by Giovanni Manzoni @ HardElettroSoft
#
# 9600 8N1 flow control Xon/Xoff
#
import array
import serial
maxReadCount=10
readBuffer = array.array('c')
print('this sample application reads from the rs-485 port')
# open the port
print('opening device /dev/ttys1')
try:
ser = serial.Serial(port='/dev/ttyS1', baudrate=9600) # or ttyS2
except:
print('failed.')
print('possible causes:')
print('1) the raspicomm device driver is not loaded. type \'lsmod\' and verify that you \'raspicommrs485\' is loaded.')
print('2) the raspicomm device driver is in use. Is another application using the device driver?')
print('3) something went wrong when loading the device driver. type \'dmesg\' and check the kernel messages')
exit()
print('successful.')
# read in a loop
print('start reading from the rs-485 port a maximum of ' + str(maxReadCount) + ' bytes')
readCount=0
i=0
while readCount < maxReadCount:
readBuffer.append(ser.read(1))
readCount=readCount+1
# print the received bytes
print('we received the following bytes:')
val=ord(readBuffer[i])
hx=''
if val >= 32 and val <= 126:
hx=' - \'{0}\''.format(readBuffer[i])
print('[{0:d}]: 0x{1:x}{2}'.format(i, val, hx))
i=i+1
|
|
d09b36eb6aec1f7ffa20113495c24989f46709e5
|
tests/test_pooling.py
|
tests/test_pooling.py
|
import Queue
import pylibmc
from nose.tools import eq_, ok_
from tests import PylibmcTestCase
class PoolTestCase(PylibmcTestCase):
pass
class ClientPoolTests(PoolTestCase):
def test_simple(self):
p = pylibmc.ClientPool(self.mc, 2)
with p.reserve() as smc:
ok_(smc)
ok_(smc.set("a", 1))
eq_(smc["a"], 1)
def test_exhaust(self):
p = pylibmc.ClientPool(self.mc, 2)
with p.reserve() as smc1:
with p.reserve() as smc2:
self.assertRaises(Queue.Empty, p.reserve().__enter__)
# TODO Thread-mapped pool tests
|
Add unit tests for pooling
|
Add unit tests for pooling
|
Python
|
bsd-3-clause
|
lericson/pylibmc,lericson/pylibmc,lericson/pylibmc
|
Add unit tests for pooling
|
import Queue
import pylibmc
from nose.tools import eq_, ok_
from tests import PylibmcTestCase
class PoolTestCase(PylibmcTestCase):
pass
class ClientPoolTests(PoolTestCase):
def test_simple(self):
p = pylibmc.ClientPool(self.mc, 2)
with p.reserve() as smc:
ok_(smc)
ok_(smc.set("a", 1))
eq_(smc["a"], 1)
def test_exhaust(self):
p = pylibmc.ClientPool(self.mc, 2)
with p.reserve() as smc1:
with p.reserve() as smc2:
self.assertRaises(Queue.Empty, p.reserve().__enter__)
# TODO Thread-mapped pool tests
|
<commit_before><commit_msg>Add unit tests for pooling<commit_after>
|
import Queue
import pylibmc
from nose.tools import eq_, ok_
from tests import PylibmcTestCase
class PoolTestCase(PylibmcTestCase):
pass
class ClientPoolTests(PoolTestCase):
def test_simple(self):
p = pylibmc.ClientPool(self.mc, 2)
with p.reserve() as smc:
ok_(smc)
ok_(smc.set("a", 1))
eq_(smc["a"], 1)
def test_exhaust(self):
p = pylibmc.ClientPool(self.mc, 2)
with p.reserve() as smc1:
with p.reserve() as smc2:
self.assertRaises(Queue.Empty, p.reserve().__enter__)
# TODO Thread-mapped pool tests
|
Add unit tests for poolingimport Queue
import pylibmc
from nose.tools import eq_, ok_
from tests import PylibmcTestCase
class PoolTestCase(PylibmcTestCase):
pass
class ClientPoolTests(PoolTestCase):
def test_simple(self):
p = pylibmc.ClientPool(self.mc, 2)
with p.reserve() as smc:
ok_(smc)
ok_(smc.set("a", 1))
eq_(smc["a"], 1)
def test_exhaust(self):
p = pylibmc.ClientPool(self.mc, 2)
with p.reserve() as smc1:
with p.reserve() as smc2:
self.assertRaises(Queue.Empty, p.reserve().__enter__)
# TODO Thread-mapped pool tests
|
<commit_before><commit_msg>Add unit tests for pooling<commit_after>import Queue
import pylibmc
from nose.tools import eq_, ok_
from tests import PylibmcTestCase
class PoolTestCase(PylibmcTestCase):
pass
class ClientPoolTests(PoolTestCase):
def test_simple(self):
p = pylibmc.ClientPool(self.mc, 2)
with p.reserve() as smc:
ok_(smc)
ok_(smc.set("a", 1))
eq_(smc["a"], 1)
def test_exhaust(self):
p = pylibmc.ClientPool(self.mc, 2)
with p.reserve() as smc1:
with p.reserve() as smc2:
self.assertRaises(Queue.Empty, p.reserve().__enter__)
# TODO Thread-mapped pool tests
|
|
d48975e826dd7adac508f28618a06439a8cf50f4
|
queries-limits.py
|
queries-limits.py
|
#!/usr/bin/env python
from avocado import main
from sdcm.tester import ClusterTester
from sdcm.tester import clean_aws_resources
class GrowClusterTest(ClusterTester):
"""
Test scylla cluster growth (adding nodes after an initial cluster size).
:avocado: enable
"""
@clean_aws_resources
def setUp(self):
self.credentials = None
self.db_cluster = None
self.loaders = None
# we will give a very slow disk to the db node
# so the loader node will easilly saturate it
bdm = [{"DeviceName": "/dev/sda1",
"Ebs": {"Iops": 100,
"VolumeType": "io1",
"DeleteOnTermination": True}}]
# Use big instance to be not throttled by the network
self.init_resources(n_db_nodes=1, n_loader_nodes=1,
dbs_block_device_mappings=bdm,
dbs_type='m4.4xlarge',
loaders_type='m4.4xlarge')
self.loaders.wait_for_init()
self.db_cluster.wait_for_init()
self.stress_thread = None
self.payload = "/tmp/payload"
self.db_cluster.run("grep -v SCYLLA_ARGS /etc/sysconfig/scylla-server > /tmp/l")
self.db_cluster.run("""echo "SCYLLA_ARGS=\"-m 128M -c 1\"" >> /tmp/l""")
self.db_cluster.run("sudo cp /tmp/l /etc/sysconfig/scylla-server")
self.db_cluster.run("sudo chown root.root /etc/sysconfig/scylla-server")
self.db_cluster.run("sudo systemctl stop scylla-server.service")
self.db_cluster.run("sudo systemctl start scylla-server.service")
self.loaders.run("sudo dnf install -y boost-program-options")
self.loaders.run("sudo dnf install -y libuv")
self.loaders.send_file("queries-limits", self.payload)
self.loaders.run("chmod +x " + self.payload)
def test_connexion_limits(self):
ips = self.db_cluster.get_node_private_ips()
params = " --servers %s --duration 600 --queries 1000000" % (ips[0])
self.run_stress(stress_cmd=(self.payload + params), duration=10)
if __name__ == '__main__':
main()
|
Add the queries limits test.
|
limits: Add the queries limits test.
Use a C++ payload to check if the queries limitation
works.
Fixes #180.
Signed-off-by: Benoît Canet <ecd1f14f7c1c6dc7a40210bdcc3810e0107ecbc8@scylladb.com>
|
Python
|
agpl-3.0
|
scylladb/scylla-cluster-tests,scylladb/scylla-cluster-tests,scylladb/scylla-longevity-tests,scylladb/scylla-cluster-tests,scylladb/scylla-cluster-tests,amoskong/scylla-cluster-tests,amoskong/scylla-cluster-tests,scylladb/scylla-longevity-tests,scylladb/scylla-cluster-tests,amoskong/scylla-cluster-tests,amoskong/scylla-cluster-tests,scylladb/scylla-longevity-tests,amoskong/scylla-cluster-tests
|
limits: Add the queries limits test.
Use a C++ payload to check if the queries limitation
works.
Fixes #180.
Signed-off-by: Benoît Canet <ecd1f14f7c1c6dc7a40210bdcc3810e0107ecbc8@scylladb.com>
|
#!/usr/bin/env python
from avocado import main
from sdcm.tester import ClusterTester
from sdcm.tester import clean_aws_resources
class GrowClusterTest(ClusterTester):
"""
Test scylla cluster growth (adding nodes after an initial cluster size).
:avocado: enable
"""
@clean_aws_resources
def setUp(self):
self.credentials = None
self.db_cluster = None
self.loaders = None
# we will give a very slow disk to the db node
# so the loader node will easilly saturate it
bdm = [{"DeviceName": "/dev/sda1",
"Ebs": {"Iops": 100,
"VolumeType": "io1",
"DeleteOnTermination": True}}]
# Use big instance to be not throttled by the network
self.init_resources(n_db_nodes=1, n_loader_nodes=1,
dbs_block_device_mappings=bdm,
dbs_type='m4.4xlarge',
loaders_type='m4.4xlarge')
self.loaders.wait_for_init()
self.db_cluster.wait_for_init()
self.stress_thread = None
self.payload = "/tmp/payload"
self.db_cluster.run("grep -v SCYLLA_ARGS /etc/sysconfig/scylla-server > /tmp/l")
self.db_cluster.run("""echo "SCYLLA_ARGS=\"-m 128M -c 1\"" >> /tmp/l""")
self.db_cluster.run("sudo cp /tmp/l /etc/sysconfig/scylla-server")
self.db_cluster.run("sudo chown root.root /etc/sysconfig/scylla-server")
self.db_cluster.run("sudo systemctl stop scylla-server.service")
self.db_cluster.run("sudo systemctl start scylla-server.service")
self.loaders.run("sudo dnf install -y boost-program-options")
self.loaders.run("sudo dnf install -y libuv")
self.loaders.send_file("queries-limits", self.payload)
self.loaders.run("chmod +x " + self.payload)
def test_connexion_limits(self):
ips = self.db_cluster.get_node_private_ips()
params = " --servers %s --duration 600 --queries 1000000" % (ips[0])
self.run_stress(stress_cmd=(self.payload + params), duration=10)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>limits: Add the queries limits test.
Use a C++ payload to check if the queries limitation
works.
Fixes #180.
Signed-off-by: Benoît Canet <ecd1f14f7c1c6dc7a40210bdcc3810e0107ecbc8@scylladb.com><commit_after>
|
#!/usr/bin/env python
from avocado import main
from sdcm.tester import ClusterTester
from sdcm.tester import clean_aws_resources
class GrowClusterTest(ClusterTester):
"""
Test scylla cluster growth (adding nodes after an initial cluster size).
:avocado: enable
"""
@clean_aws_resources
def setUp(self):
self.credentials = None
self.db_cluster = None
self.loaders = None
# we will give a very slow disk to the db node
# so the loader node will easilly saturate it
bdm = [{"DeviceName": "/dev/sda1",
"Ebs": {"Iops": 100,
"VolumeType": "io1",
"DeleteOnTermination": True}}]
# Use big instance to be not throttled by the network
self.init_resources(n_db_nodes=1, n_loader_nodes=1,
dbs_block_device_mappings=bdm,
dbs_type='m4.4xlarge',
loaders_type='m4.4xlarge')
self.loaders.wait_for_init()
self.db_cluster.wait_for_init()
self.stress_thread = None
self.payload = "/tmp/payload"
self.db_cluster.run("grep -v SCYLLA_ARGS /etc/sysconfig/scylla-server > /tmp/l")
self.db_cluster.run("""echo "SCYLLA_ARGS=\"-m 128M -c 1\"" >> /tmp/l""")
self.db_cluster.run("sudo cp /tmp/l /etc/sysconfig/scylla-server")
self.db_cluster.run("sudo chown root.root /etc/sysconfig/scylla-server")
self.db_cluster.run("sudo systemctl stop scylla-server.service")
self.db_cluster.run("sudo systemctl start scylla-server.service")
self.loaders.run("sudo dnf install -y boost-program-options")
self.loaders.run("sudo dnf install -y libuv")
self.loaders.send_file("queries-limits", self.payload)
self.loaders.run("chmod +x " + self.payload)
def test_connexion_limits(self):
ips = self.db_cluster.get_node_private_ips()
params = " --servers %s --duration 600 --queries 1000000" % (ips[0])
self.run_stress(stress_cmd=(self.payload + params), duration=10)
if __name__ == '__main__':
main()
|
limits: Add the queries limits test.
Use a C++ payload to check if the queries limitation
works.
Fixes #180.
Signed-off-by: Benoît Canet <ecd1f14f7c1c6dc7a40210bdcc3810e0107ecbc8@scylladb.com>#!/usr/bin/env python
from avocado import main
from sdcm.tester import ClusterTester
from sdcm.tester import clean_aws_resources
class GrowClusterTest(ClusterTester):
"""
Test scylla cluster growth (adding nodes after an initial cluster size).
:avocado: enable
"""
@clean_aws_resources
def setUp(self):
self.credentials = None
self.db_cluster = None
self.loaders = None
# we will give a very slow disk to the db node
# so the loader node will easilly saturate it
bdm = [{"DeviceName": "/dev/sda1",
"Ebs": {"Iops": 100,
"VolumeType": "io1",
"DeleteOnTermination": True}}]
# Use big instance to be not throttled by the network
self.init_resources(n_db_nodes=1, n_loader_nodes=1,
dbs_block_device_mappings=bdm,
dbs_type='m4.4xlarge',
loaders_type='m4.4xlarge')
self.loaders.wait_for_init()
self.db_cluster.wait_for_init()
self.stress_thread = None
self.payload = "/tmp/payload"
self.db_cluster.run("grep -v SCYLLA_ARGS /etc/sysconfig/scylla-server > /tmp/l")
self.db_cluster.run("""echo "SCYLLA_ARGS=\"-m 128M -c 1\"" >> /tmp/l""")
self.db_cluster.run("sudo cp /tmp/l /etc/sysconfig/scylla-server")
self.db_cluster.run("sudo chown root.root /etc/sysconfig/scylla-server")
self.db_cluster.run("sudo systemctl stop scylla-server.service")
self.db_cluster.run("sudo systemctl start scylla-server.service")
self.loaders.run("sudo dnf install -y boost-program-options")
self.loaders.run("sudo dnf install -y libuv")
self.loaders.send_file("queries-limits", self.payload)
self.loaders.run("chmod +x " + self.payload)
def test_connexion_limits(self):
ips = self.db_cluster.get_node_private_ips()
params = " --servers %s --duration 600 --queries 1000000" % (ips[0])
self.run_stress(stress_cmd=(self.payload + params), duration=10)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>limits: Add the queries limits test.
Use a C++ payload to check if the queries limitation
works.
Fixes #180.
Signed-off-by: Benoît Canet <ecd1f14f7c1c6dc7a40210bdcc3810e0107ecbc8@scylladb.com><commit_after>#!/usr/bin/env python
from avocado import main
from sdcm.tester import ClusterTester
from sdcm.tester import clean_aws_resources
class GrowClusterTest(ClusterTester):
"""
Test scylla cluster growth (adding nodes after an initial cluster size).
:avocado: enable
"""
@clean_aws_resources
def setUp(self):
self.credentials = None
self.db_cluster = None
self.loaders = None
# we will give a very slow disk to the db node
# so the loader node will easilly saturate it
bdm = [{"DeviceName": "/dev/sda1",
"Ebs": {"Iops": 100,
"VolumeType": "io1",
"DeleteOnTermination": True}}]
# Use big instance to be not throttled by the network
self.init_resources(n_db_nodes=1, n_loader_nodes=1,
dbs_block_device_mappings=bdm,
dbs_type='m4.4xlarge',
loaders_type='m4.4xlarge')
self.loaders.wait_for_init()
self.db_cluster.wait_for_init()
self.stress_thread = None
self.payload = "/tmp/payload"
self.db_cluster.run("grep -v SCYLLA_ARGS /etc/sysconfig/scylla-server > /tmp/l")
self.db_cluster.run("""echo "SCYLLA_ARGS=\"-m 128M -c 1\"" >> /tmp/l""")
self.db_cluster.run("sudo cp /tmp/l /etc/sysconfig/scylla-server")
self.db_cluster.run("sudo chown root.root /etc/sysconfig/scylla-server")
self.db_cluster.run("sudo systemctl stop scylla-server.service")
self.db_cluster.run("sudo systemctl start scylla-server.service")
self.loaders.run("sudo dnf install -y boost-program-options")
self.loaders.run("sudo dnf install -y libuv")
self.loaders.send_file("queries-limits", self.payload)
self.loaders.run("chmod +x " + self.payload)
def test_connexion_limits(self):
ips = self.db_cluster.get_node_private_ips()
params = " --servers %s --duration 600 --queries 1000000" % (ips[0])
self.run_stress(stress_cmd=(self.payload + params), duration=10)
if __name__ == '__main__':
main()
|
|
b8ea919b0b7f7f4b9cb2ddf548f0e674f73e5411
|
tests/forcing_single_position/heat_flux.py
|
tests/forcing_single_position/heat_flux.py
|
import os
import sys
import vtktools
import math
import numpy
from numpy import finfo
def flux(file,x,y):
u=vtktools.vtu(file)
flux = u.GetScalarField('HeatFlux')
pos = u.GetLocations()
f = finfo(float)
for i in range(0,len(flux)):
if( abs(pos[i,0] - x) < f.eps and abs(pos[i,1] - y) < f.eps and (pos[i,2] - 0.0) < f.eps ):
return flux[i]
return -666
|
Add the one that got away
|
Add the one that got away
|
Python
|
lgpl-2.1
|
iakovos-panourgias/fluidity,jrper/fluidity,jrper/fluidity,iakovos-panourgias/fluidity,jrper/fluidity,jjo31/ATHAM-Fluidity,rjferrier/fluidity,rjferrier/fluidity,iakovos-panourgias/fluidity,jjo31/ATHAM-Fluidity,iakovos-panourgias/fluidity,rjferrier/fluidity,jrper/fluidity,jjo31/ATHAM-Fluidity,jrper/fluidity,jjo31/ATHAM-Fluidity,rjferrier/fluidity,rjferrier/fluidity,jjo31/ATHAM-Fluidity,iakovos-panourgias/fluidity
|
Add the one that got away
|
import os
import sys
import vtktools
import math
import numpy
from numpy import finfo
def flux(file,x,y):
u=vtktools.vtu(file)
flux = u.GetScalarField('HeatFlux')
pos = u.GetLocations()
f = finfo(float)
for i in range(0,len(flux)):
if( abs(pos[i,0] - x) < f.eps and abs(pos[i,1] - y) < f.eps and (pos[i,2] - 0.0) < f.eps ):
return flux[i]
return -666
|
<commit_before><commit_msg>Add the one that got away<commit_after>
|
import os
import sys
import vtktools
import math
import numpy
from numpy import finfo
def flux(file,x,y):
u=vtktools.vtu(file)
flux = u.GetScalarField('HeatFlux')
pos = u.GetLocations()
f = finfo(float)
for i in range(0,len(flux)):
if( abs(pos[i,0] - x) < f.eps and abs(pos[i,1] - y) < f.eps and (pos[i,2] - 0.0) < f.eps ):
return flux[i]
return -666
|
Add the one that got awayimport os
import sys
import vtktools
import math
import numpy
from numpy import finfo
def flux(file,x,y):
u=vtktools.vtu(file)
flux = u.GetScalarField('HeatFlux')
pos = u.GetLocations()
f = finfo(float)
for i in range(0,len(flux)):
if( abs(pos[i,0] - x) < f.eps and abs(pos[i,1] - y) < f.eps and (pos[i,2] - 0.0) < f.eps ):
return flux[i]
return -666
|
<commit_before><commit_msg>Add the one that got away<commit_after>import os
import sys
import vtktools
import math
import numpy
from numpy import finfo
def flux(file,x,y):
u=vtktools.vtu(file)
flux = u.GetScalarField('HeatFlux')
pos = u.GetLocations()
f = finfo(float)
for i in range(0,len(flux)):
if( abs(pos[i,0] - x) < f.eps and abs(pos[i,1] - y) < f.eps and (pos[i,2] - 0.0) < f.eps ):
return flux[i]
return -666
|
|
a93f81b18262b1e29d11bd101691162b2b5face3
|
MozillaPage1.py
|
MozillaPage1.py
|
from selenium import webdriver
driver = webdriver.Firefox()
driver.get("https://marketplace-dev.allizom.org/")
driver.find_element_by_class_name("header--search-togle").click()
driver.implicitly_wait(2)
driver.find_element_by_id("search-q").send_keys("Hello")
driver.find.send_keys(Keys.RETURN)
assert "Hello | Firefox Marketplace" in driver.title
driver.close()
|
Test Mozilla Marketplace with simple python
|
Test Mozilla Marketplace with simple python
Search Text "Hello"
|
Python
|
mit
|
bishnucit/Python-Preludes
|
Test Mozilla Marketplace with simple python
Search Text "Hello"
|
from selenium import webdriver
driver = webdriver.Firefox()
driver.get("https://marketplace-dev.allizom.org/")
driver.find_element_by_class_name("header--search-togle").click()
driver.implicitly_wait(2)
driver.find_element_by_id("search-q").send_keys("Hello")
driver.find.send_keys(Keys.RETURN)
assert "Hello | Firefox Marketplace" in driver.title
driver.close()
|
<commit_before><commit_msg>Test Mozilla Marketplace with simple python
Search Text "Hello"<commit_after>
|
from selenium import webdriver
driver = webdriver.Firefox()
driver.get("https://marketplace-dev.allizom.org/")
driver.find_element_by_class_name("header--search-togle").click()
driver.implicitly_wait(2)
driver.find_element_by_id("search-q").send_keys("Hello")
driver.find.send_keys(Keys.RETURN)
assert "Hello | Firefox Marketplace" in driver.title
driver.close()
|
Test Mozilla Marketplace with simple python
Search Text "Hello"from selenium import webdriver
driver = webdriver.Firefox()
driver.get("https://marketplace-dev.allizom.org/")
driver.find_element_by_class_name("header--search-togle").click()
driver.implicitly_wait(2)
driver.find_element_by_id("search-q").send_keys("Hello")
driver.find.send_keys(Keys.RETURN)
assert "Hello | Firefox Marketplace" in driver.title
driver.close()
|
<commit_before><commit_msg>Test Mozilla Marketplace with simple python
Search Text "Hello"<commit_after>from selenium import webdriver
driver = webdriver.Firefox()
driver.get("https://marketplace-dev.allizom.org/")
driver.find_element_by_class_name("header--search-togle").click()
driver.implicitly_wait(2)
driver.find_element_by_id("search-q").send_keys("Hello")
driver.find.send_keys(Keys.RETURN)
assert "Hello | Firefox Marketplace" in driver.title
driver.close()
|
|
988a55aa42e04c57dc58b04d631c30a899bba664
|
bench_runtime.py
|
bench_runtime.py
|
import time
import math
import matplotlib.pyplot as plt
from dgim.dgim import Dgim
from dgim.utils import generate_random_stream
def measure_update_time(N, iterations):
dgim = Dgim(N)
# initialization
for elt in generate_random_stream(N):
dgim.update(elt)
time_start = time.time()
bucket_count = 0
for elt in generate_random_stream(iterations):
dgim.update(elt)
bucket_count += len(dgim.buckets)
time_stop = time.time()
return time_stop - time_start, bucket_count/float(iterations)
def run_update_benchmark():
times = []
bucket_counts = []
for i in range(24):
time, bucket_count = measure_update_time(2 ** i, iterations=100000)
print 2 ** i, time
times.append((2 ** i, time))
bucket_counts.append(bucket_count)
plt.plot([n for n, time in times], [time for n, time in times])
#plt.plot(bucket_counts, [time for n, time in times])
plt.show()
if __name__ == "__main__":
run_update_benchmark()
|
Add dummy script to benchmark operation times()
|
Add dummy script to benchmark operation times()
|
Python
|
bsd-3-clause
|
simondolle/dgim,simondolle/dgim
|
Add dummy script to benchmark operation times()
|
import time
import math
import matplotlib.pyplot as plt
from dgim.dgim import Dgim
from dgim.utils import generate_random_stream
def measure_update_time(N, iterations):
dgim = Dgim(N)
# initialization
for elt in generate_random_stream(N):
dgim.update(elt)
time_start = time.time()
bucket_count = 0
for elt in generate_random_stream(iterations):
dgim.update(elt)
bucket_count += len(dgim.buckets)
time_stop = time.time()
return time_stop - time_start, bucket_count/float(iterations)
def run_update_benchmark():
times = []
bucket_counts = []
for i in range(24):
time, bucket_count = measure_update_time(2 ** i, iterations=100000)
print 2 ** i, time
times.append((2 ** i, time))
bucket_counts.append(bucket_count)
plt.plot([n for n, time in times], [time for n, time in times])
#plt.plot(bucket_counts, [time for n, time in times])
plt.show()
if __name__ == "__main__":
run_update_benchmark()
|
<commit_before><commit_msg>Add dummy script to benchmark operation times()<commit_after>
|
import time
import math
import matplotlib.pyplot as plt
from dgim.dgim import Dgim
from dgim.utils import generate_random_stream
def measure_update_time(N, iterations):
dgim = Dgim(N)
# initialization
for elt in generate_random_stream(N):
dgim.update(elt)
time_start = time.time()
bucket_count = 0
for elt in generate_random_stream(iterations):
dgim.update(elt)
bucket_count += len(dgim.buckets)
time_stop = time.time()
return time_stop - time_start, bucket_count/float(iterations)
def run_update_benchmark():
times = []
bucket_counts = []
for i in range(24):
time, bucket_count = measure_update_time(2 ** i, iterations=100000)
print 2 ** i, time
times.append((2 ** i, time))
bucket_counts.append(bucket_count)
plt.plot([n for n, time in times], [time for n, time in times])
#plt.plot(bucket_counts, [time for n, time in times])
plt.show()
if __name__ == "__main__":
run_update_benchmark()
|
Add dummy script to benchmark operation times()import time
import math
import matplotlib.pyplot as plt
from dgim.dgim import Dgim
from dgim.utils import generate_random_stream
def measure_update_time(N, iterations):
dgim = Dgim(N)
# initialization
for elt in generate_random_stream(N):
dgim.update(elt)
time_start = time.time()
bucket_count = 0
for elt in generate_random_stream(iterations):
dgim.update(elt)
bucket_count += len(dgim.buckets)
time_stop = time.time()
return time_stop - time_start, bucket_count/float(iterations)
def run_update_benchmark():
times = []
bucket_counts = []
for i in range(24):
time, bucket_count = measure_update_time(2 ** i, iterations=100000)
print 2 ** i, time
times.append((2 ** i, time))
bucket_counts.append(bucket_count)
plt.plot([n for n, time in times], [time for n, time in times])
#plt.plot(bucket_counts, [time for n, time in times])
plt.show()
if __name__ == "__main__":
run_update_benchmark()
|
<commit_before><commit_msg>Add dummy script to benchmark operation times()<commit_after>import time
import math
import matplotlib.pyplot as plt
from dgim.dgim import Dgim
from dgim.utils import generate_random_stream
def measure_update_time(N, iterations):
dgim = Dgim(N)
# initialization
for elt in generate_random_stream(N):
dgim.update(elt)
time_start = time.time()
bucket_count = 0
for elt in generate_random_stream(iterations):
dgim.update(elt)
bucket_count += len(dgim.buckets)
time_stop = time.time()
return time_stop - time_start, bucket_count/float(iterations)
def run_update_benchmark():
times = []
bucket_counts = []
for i in range(24):
time, bucket_count = measure_update_time(2 ** i, iterations=100000)
print 2 ** i, time
times.append((2 ** i, time))
bucket_counts.append(bucket_count)
plt.plot([n for n, time in times], [time for n, time in times])
#plt.plot(bucket_counts, [time for n, time in times])
plt.show()
if __name__ == "__main__":
run_update_benchmark()
|
|
fd85068b56d4a01bd5ade5773ae1299f0ac1b5e8
|
test/unit/test_sorted_set.py
|
test/unit/test_sorted_set.py
|
# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import pytest
from clique.sorted_set import SortedSet
@pytest.fixture
def standard_set(request):
'''Return sorted set.'''
return SortedSet([4, 5, 6, 7, 2, 1, 1])
@pytest.mark.parametrize(('item', 'expected'), [
(1, True),
(10, False)
], ids=[
'item present',
'item not present'
])
def test_contains(item, expected, standard_set):
'''Check item membership.'''
assert (item in standard_set) is expected
@pytest.mark.parametrize(('sorted_set', 'expected'), [
(SortedSet(), 0),
(SortedSet([]), 0),
(SortedSet([1]), 1),
(SortedSet([1, 2, 3]), 3),
(SortedSet([1, 1, 2, 2, 3, 3]), 4)
], ids=[
'no iterable',
'empty iterable',
'single item',
'multiple items',
'duplicate multiple items'
])
def test_len(sorted_set, expected):
'''Calculate set length.'''
assert len(sorted_set) == expected
@pytest.fixture
def standard_set(request):
'''Return sorted set.'''
return SortedSet([4, 5, 6, 7, 2, 1, 1])
@pytest.mark.parametrize(('sorted_set', 'item', 'expected'), [
(SortedSet(), 1, 1),
(SortedSet([1]), 1, 1),
(SortedSet([1]), 2, 2)
], ids=[
'item',
'existing item',
'new item'
])
def test_add(sorted_set, item, expected):
'''Add item.'''
sorted_set.add(item)
assert item in sorted_set
assert len(sorted_set) == expected
@pytest.mark.parametrize(('sorted_set', 'item'), [
(SortedSet([1]), 1),
(SortedSet(), 1)
], ids=[
'present item',
'missing item'
])
def test_discard(sorted_set, item):
'''Discard item.'''
sorted_set.discard(item)
assert item not in sorted_set
|
Add initial unit tests for SortedSet.
|
Add initial unit tests for SortedSet.
|
Python
|
apache-2.0
|
4degrees/clique
|
Add initial unit tests for SortedSet.
|
# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import pytest
from clique.sorted_set import SortedSet
@pytest.fixture
def standard_set(request):
'''Return sorted set.'''
return SortedSet([4, 5, 6, 7, 2, 1, 1])
@pytest.mark.parametrize(('item', 'expected'), [
(1, True),
(10, False)
], ids=[
'item present',
'item not present'
])
def test_contains(item, expected, standard_set):
'''Check item membership.'''
assert (item in standard_set) is expected
@pytest.mark.parametrize(('sorted_set', 'expected'), [
(SortedSet(), 0),
(SortedSet([]), 0),
(SortedSet([1]), 1),
(SortedSet([1, 2, 3]), 3),
(SortedSet([1, 1, 2, 2, 3, 3]), 4)
], ids=[
'no iterable',
'empty iterable',
'single item',
'multiple items',
'duplicate multiple items'
])
def test_len(sorted_set, expected):
'''Calculate set length.'''
assert len(sorted_set) == expected
@pytest.fixture
def standard_set(request):
'''Return sorted set.'''
return SortedSet([4, 5, 6, 7, 2, 1, 1])
@pytest.mark.parametrize(('sorted_set', 'item', 'expected'), [
(SortedSet(), 1, 1),
(SortedSet([1]), 1, 1),
(SortedSet([1]), 2, 2)
], ids=[
'item',
'existing item',
'new item'
])
def test_add(sorted_set, item, expected):
'''Add item.'''
sorted_set.add(item)
assert item in sorted_set
assert len(sorted_set) == expected
@pytest.mark.parametrize(('sorted_set', 'item'), [
(SortedSet([1]), 1),
(SortedSet(), 1)
], ids=[
'present item',
'missing item'
])
def test_discard(sorted_set, item):
'''Discard item.'''
sorted_set.discard(item)
assert item not in sorted_set
|
<commit_before><commit_msg>Add initial unit tests for SortedSet.<commit_after>
|
# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import pytest
from clique.sorted_set import SortedSet
@pytest.fixture
def standard_set(request):
'''Return sorted set.'''
return SortedSet([4, 5, 6, 7, 2, 1, 1])
@pytest.mark.parametrize(('item', 'expected'), [
(1, True),
(10, False)
], ids=[
'item present',
'item not present'
])
def test_contains(item, expected, standard_set):
'''Check item membership.'''
assert (item in standard_set) is expected
@pytest.mark.parametrize(('sorted_set', 'expected'), [
(SortedSet(), 0),
(SortedSet([]), 0),
(SortedSet([1]), 1),
(SortedSet([1, 2, 3]), 3),
(SortedSet([1, 1, 2, 2, 3, 3]), 4)
], ids=[
'no iterable',
'empty iterable',
'single item',
'multiple items',
'duplicate multiple items'
])
def test_len(sorted_set, expected):
'''Calculate set length.'''
assert len(sorted_set) == expected
@pytest.fixture
def standard_set(request):
'''Return sorted set.'''
return SortedSet([4, 5, 6, 7, 2, 1, 1])
@pytest.mark.parametrize(('sorted_set', 'item', 'expected'), [
(SortedSet(), 1, 1),
(SortedSet([1]), 1, 1),
(SortedSet([1]), 2, 2)
], ids=[
'item',
'existing item',
'new item'
])
def test_add(sorted_set, item, expected):
'''Add item.'''
sorted_set.add(item)
assert item in sorted_set
assert len(sorted_set) == expected
@pytest.mark.parametrize(('sorted_set', 'item'), [
(SortedSet([1]), 1),
(SortedSet(), 1)
], ids=[
'present item',
'missing item'
])
def test_discard(sorted_set, item):
'''Discard item.'''
sorted_set.discard(item)
assert item not in sorted_set
|
Add initial unit tests for SortedSet.# :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import pytest
from clique.sorted_set import SortedSet
@pytest.fixture
def standard_set(request):
'''Return sorted set.'''
return SortedSet([4, 5, 6, 7, 2, 1, 1])
@pytest.mark.parametrize(('item', 'expected'), [
(1, True),
(10, False)
], ids=[
'item present',
'item not present'
])
def test_contains(item, expected, standard_set):
'''Check item membership.'''
assert (item in standard_set) is expected
@pytest.mark.parametrize(('sorted_set', 'expected'), [
(SortedSet(), 0),
(SortedSet([]), 0),
(SortedSet([1]), 1),
(SortedSet([1, 2, 3]), 3),
(SortedSet([1, 1, 2, 2, 3, 3]), 4)
], ids=[
'no iterable',
'empty iterable',
'single item',
'multiple items',
'duplicate multiple items'
])
def test_len(sorted_set, expected):
'''Calculate set length.'''
assert len(sorted_set) == expected
@pytest.fixture
def standard_set(request):
'''Return sorted set.'''
return SortedSet([4, 5, 6, 7, 2, 1, 1])
@pytest.mark.parametrize(('sorted_set', 'item', 'expected'), [
(SortedSet(), 1, 1),
(SortedSet([1]), 1, 1),
(SortedSet([1]), 2, 2)
], ids=[
'item',
'existing item',
'new item'
])
def test_add(sorted_set, item, expected):
'''Add item.'''
sorted_set.add(item)
assert item in sorted_set
assert len(sorted_set) == expected
@pytest.mark.parametrize(('sorted_set', 'item'), [
(SortedSet([1]), 1),
(SortedSet(), 1)
], ids=[
'present item',
'missing item'
])
def test_discard(sorted_set, item):
'''Discard item.'''
sorted_set.discard(item)
assert item not in sorted_set
|
<commit_before><commit_msg>Add initial unit tests for SortedSet.<commit_after># :coding: utf-8
# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips
# :license: See LICENSE.txt.
import pytest
from clique.sorted_set import SortedSet
@pytest.fixture
def standard_set(request):
'''Return sorted set.'''
return SortedSet([4, 5, 6, 7, 2, 1, 1])
@pytest.mark.parametrize(('item', 'expected'), [
(1, True),
(10, False)
], ids=[
'item present',
'item not present'
])
def test_contains(item, expected, standard_set):
'''Check item membership.'''
assert (item in standard_set) is expected
@pytest.mark.parametrize(('sorted_set', 'expected'), [
(SortedSet(), 0),
(SortedSet([]), 0),
(SortedSet([1]), 1),
(SortedSet([1, 2, 3]), 3),
(SortedSet([1, 1, 2, 2, 3, 3]), 4)
], ids=[
'no iterable',
'empty iterable',
'single item',
'multiple items',
'duplicate multiple items'
])
def test_len(sorted_set, expected):
'''Calculate set length.'''
assert len(sorted_set) == expected
@pytest.fixture
def standard_set(request):
'''Return sorted set.'''
return SortedSet([4, 5, 6, 7, 2, 1, 1])
@pytest.mark.parametrize(('sorted_set', 'item', 'expected'), [
(SortedSet(), 1, 1),
(SortedSet([1]), 1, 1),
(SortedSet([1]), 2, 2)
], ids=[
'item',
'existing item',
'new item'
])
def test_add(sorted_set, item, expected):
'''Add item.'''
sorted_set.add(item)
assert item in sorted_set
assert len(sorted_set) == expected
@pytest.mark.parametrize(('sorted_set', 'item'), [
(SortedSet([1]), 1),
(SortedSet(), 1)
], ids=[
'present item',
'missing item'
])
def test_discard(sorted_set, item):
'''Discard item.'''
sorted_set.discard(item)
assert item not in sorted_set
|
|
cf439f01f8370971799182abc0e0c635037d2b2f
|
tests/test_helpers.py
|
tests/test_helpers.py
|
from sanic.helpers import has_message_body
def test_has_message_body():
tests = (
(100, False),
(102, False),
(204, False),
(200, True),
(304, False),
(400, True),
)
for status_code, expected in tests:
assert has_message_body(status_code) is expected
|
Add test for has_message_body helper function.
|
Add test for has_message_body helper function.
|
Python
|
mit
|
yunstanford/sanic,lixxu/sanic,yunstanford/sanic,channelcat/sanic,lixxu/sanic,channelcat/sanic,lixxu/sanic,ashleysommer/sanic,yunstanford/sanic,yunstanford/sanic,channelcat/sanic,ashleysommer/sanic,ashleysommer/sanic,lixxu/sanic,channelcat/sanic
|
Add test for has_message_body helper function.
|
from sanic.helpers import has_message_body
def test_has_message_body():
tests = (
(100, False),
(102, False),
(204, False),
(200, True),
(304, False),
(400, True),
)
for status_code, expected in tests:
assert has_message_body(status_code) is expected
|
<commit_before><commit_msg>Add test for has_message_body helper function.<commit_after>
|
from sanic.helpers import has_message_body
def test_has_message_body():
tests = (
(100, False),
(102, False),
(204, False),
(200, True),
(304, False),
(400, True),
)
for status_code, expected in tests:
assert has_message_body(status_code) is expected
|
Add test for has_message_body helper function.from sanic.helpers import has_message_body
def test_has_message_body():
tests = (
(100, False),
(102, False),
(204, False),
(200, True),
(304, False),
(400, True),
)
for status_code, expected in tests:
assert has_message_body(status_code) is expected
|
<commit_before><commit_msg>Add test for has_message_body helper function.<commit_after>from sanic.helpers import has_message_body
def test_has_message_body():
tests = (
(100, False),
(102, False),
(204, False),
(200, True),
(304, False),
(400, True),
)
for status_code, expected in tests:
assert has_message_body(status_code) is expected
|
|
21b25852f7b1b9457c5d233c9b5ef14d2a33a9a5
|
src/test_client.py
|
src/test_client.py
|
#!/usr/bin/python
import traffic
import argparse
from datetime import datetime, timedelta
parser = argparse.ArgumentParser()
parser.add_argument("--connect", type=str, help="hostname:port")
parser.add_argument("--interval", type=int, help="summary interval")
parser.add_argument("clients", type=str, nargs="+", metavar="C", help="Clients to get summary for")
if __name__ == "__main__":
args = parser.parse_args()
end = datetime.now()
start = end - timedelta(hours=args.interval)
with traffic.Connection("tcp://" + args.connect) as c:
summary = traffic.get_summary(c, start, end, args.clients)
for entry in summary.data:
print entry.address, entry.sum_traffic_in, entry.sum_traffic_out
|
Add a script for simple summary query testing
|
Add a script for simple summary query testing
Signed-off-by: Jan Losinski <577c4104c61edf9f052c616c0c23e67bef4a9955@wh2.tu-dresden.de>
|
Python
|
bsd-3-clause
|
agdsn/traffic-service-client,agdsn/traffic-service-client
|
Add a script for simple summary query testing
Signed-off-by: Jan Losinski <577c4104c61edf9f052c616c0c23e67bef4a9955@wh2.tu-dresden.de>
|
#!/usr/bin/python
import traffic
import argparse
from datetime import datetime, timedelta
parser = argparse.ArgumentParser()
parser.add_argument("--connect", type=str, help="hostname:port")
parser.add_argument("--interval", type=int, help="summary interval")
parser.add_argument("clients", type=str, nargs="+", metavar="C", help="Clients to get summary for")
if __name__ == "__main__":
args = parser.parse_args()
end = datetime.now()
start = end - timedelta(hours=args.interval)
with traffic.Connection("tcp://" + args.connect) as c:
summary = traffic.get_summary(c, start, end, args.clients)
for entry in summary.data:
print entry.address, entry.sum_traffic_in, entry.sum_traffic_out
|
<commit_before><commit_msg>Add a script for simple summary query testing
Signed-off-by: Jan Losinski <577c4104c61edf9f052c616c0c23e67bef4a9955@wh2.tu-dresden.de><commit_after>
|
#!/usr/bin/python
import traffic
import argparse
from datetime import datetime, timedelta
parser = argparse.ArgumentParser()
parser.add_argument("--connect", type=str, help="hostname:port")
parser.add_argument("--interval", type=int, help="summary interval")
parser.add_argument("clients", type=str, nargs="+", metavar="C", help="Clients to get summary for")
if __name__ == "__main__":
args = parser.parse_args()
end = datetime.now()
start = end - timedelta(hours=args.interval)
with traffic.Connection("tcp://" + args.connect) as c:
summary = traffic.get_summary(c, start, end, args.clients)
for entry in summary.data:
print entry.address, entry.sum_traffic_in, entry.sum_traffic_out
|
Add a script for simple summary query testing
Signed-off-by: Jan Losinski <577c4104c61edf9f052c616c0c23e67bef4a9955@wh2.tu-dresden.de>#!/usr/bin/python
import traffic
import argparse
from datetime import datetime, timedelta
parser = argparse.ArgumentParser()
parser.add_argument("--connect", type=str, help="hostname:port")
parser.add_argument("--interval", type=int, help="summary interval")
parser.add_argument("clients", type=str, nargs="+", metavar="C", help="Clients to get summary for")
if __name__ == "__main__":
args = parser.parse_args()
end = datetime.now()
start = end - timedelta(hours=args.interval)
with traffic.Connection("tcp://" + args.connect) as c:
summary = traffic.get_summary(c, start, end, args.clients)
for entry in summary.data:
print entry.address, entry.sum_traffic_in, entry.sum_traffic_out
|
<commit_before><commit_msg>Add a script for simple summary query testing
Signed-off-by: Jan Losinski <577c4104c61edf9f052c616c0c23e67bef4a9955@wh2.tu-dresden.de><commit_after>#!/usr/bin/python
import traffic
import argparse
from datetime import datetime, timedelta
parser = argparse.ArgumentParser()
parser.add_argument("--connect", type=str, help="hostname:port")
parser.add_argument("--interval", type=int, help="summary interval")
parser.add_argument("clients", type=str, nargs="+", metavar="C", help="Clients to get summary for")
if __name__ == "__main__":
args = parser.parse_args()
end = datetime.now()
start = end - timedelta(hours=args.interval)
with traffic.Connection("tcp://" + args.connect) as c:
summary = traffic.get_summary(c, start, end, args.clients)
for entry in summary.data:
print entry.address, entry.sum_traffic_in, entry.sum_traffic_out
|
|
8c77b20a33917d7536e21574cc9a9e592f3f6ae7
|
test_saferedisqueue.py
|
test_saferedisqueue.py
|
from uuid import uuid1
import time
from saferedisqueue import SafeRedisQueue
def test_autocleanup():
queue = SafeRedisQueue(
name='saferedisqueue-test-%s' % uuid1().hex,
autoclean_interval=1)
queue.push('bad')
queue.push('good')
assert queue._redis.llen(queue.QUEUE_KEY) == 2
assert queue._redis.llen(queue.ACKBUF_KEY) == 0
assert queue._redis.llen(queue.BACKUP) == 0
uid_bad, payload_bad = queue.pop()
# Pop triggered first autoclean run before popping. At that time the
# ackbuf was still empty, so nothing was moved to backup. But the
# backup lock was set, to delay the next autoclean run for
# autoclean_interval seconds.
assert queue._redis.llen(queue.QUEUE_KEY) == 1
assert queue._redis.llen(queue.ACKBUF_KEY) == 1 # bad item
assert queue._redis.llen(queue.BACKUP) == 0
uid_good, payload_good = queue.pop()
# Autoclean started but instantly aborted due to backup lock.
assert queue._redis.llen(queue.ACKBUF_KEY) == 2
assert queue._redis.llen(queue.BACKUP) == 0
assert queue._redis.llen(queue.QUEUE_KEY) == 0
queue.ack(uid_good) # done with that one
assert queue._redis.llen(queue.ACKBUF_KEY) == 1 # bad item
assert queue._redis.llen(queue.BACKUP) == 0
assert queue._redis.llen(queue.QUEUE_KEY) == 0
# Pop after a autoclean_interval triggers cleanup internally
time.sleep(1.2)
assert queue.pop(timeout=-1) == (None, None)
assert queue._redis.llen(queue.ACKBUF_KEY) == 0
assert queue._redis.llen(queue.BACKUP) == 1
assert queue._redis.llen(queue.QUEUE_KEY) == 0
# Next pop triggers autoclean again; requeus; pops bad item again
time.sleep(1.2)
assert queue.pop(timeout=-1) == (uid_bad, payload_bad)
# After pop, queue is empty again, item waiting in ackbuf
assert queue._redis.llen(queue.ACKBUF_KEY) == 1
assert queue._redis.llen(queue.BACKUP) == 0
assert queue._redis.llen(queue.QUEUE_KEY) == 0
|
Add mini test suite. First for autoclean.
|
Add mini test suite. First for autoclean.
|
Python
|
bsd-3-clause
|
hellp/saferedisqueue
|
Add mini test suite. First for autoclean.
|
from uuid import uuid1
import time
from saferedisqueue import SafeRedisQueue
def test_autocleanup():
queue = SafeRedisQueue(
name='saferedisqueue-test-%s' % uuid1().hex,
autoclean_interval=1)
queue.push('bad')
queue.push('good')
assert queue._redis.llen(queue.QUEUE_KEY) == 2
assert queue._redis.llen(queue.ACKBUF_KEY) == 0
assert queue._redis.llen(queue.BACKUP) == 0
uid_bad, payload_bad = queue.pop()
# Pop triggered first autoclean run before popping. At that time the
# ackbuf was still empty, so nothing was moved to backup. But the
# backup lock was set, to delay the next autoclean run for
# autoclean_interval seconds.
assert queue._redis.llen(queue.QUEUE_KEY) == 1
assert queue._redis.llen(queue.ACKBUF_KEY) == 1 # bad item
assert queue._redis.llen(queue.BACKUP) == 0
uid_good, payload_good = queue.pop()
# Autoclean started but instantly aborted due to backup lock.
assert queue._redis.llen(queue.ACKBUF_KEY) == 2
assert queue._redis.llen(queue.BACKUP) == 0
assert queue._redis.llen(queue.QUEUE_KEY) == 0
queue.ack(uid_good) # done with that one
assert queue._redis.llen(queue.ACKBUF_KEY) == 1 # bad item
assert queue._redis.llen(queue.BACKUP) == 0
assert queue._redis.llen(queue.QUEUE_KEY) == 0
# Pop after a autoclean_interval triggers cleanup internally
time.sleep(1.2)
assert queue.pop(timeout=-1) == (None, None)
assert queue._redis.llen(queue.ACKBUF_KEY) == 0
assert queue._redis.llen(queue.BACKUP) == 1
assert queue._redis.llen(queue.QUEUE_KEY) == 0
# Next pop triggers autoclean again; requeus; pops bad item again
time.sleep(1.2)
assert queue.pop(timeout=-1) == (uid_bad, payload_bad)
# After pop, queue is empty again, item waiting in ackbuf
assert queue._redis.llen(queue.ACKBUF_KEY) == 1
assert queue._redis.llen(queue.BACKUP) == 0
assert queue._redis.llen(queue.QUEUE_KEY) == 0
|
<commit_before><commit_msg>Add mini test suite. First for autoclean.<commit_after>
|
from uuid import uuid1
import time
from saferedisqueue import SafeRedisQueue
def test_autocleanup():
queue = SafeRedisQueue(
name='saferedisqueue-test-%s' % uuid1().hex,
autoclean_interval=1)
queue.push('bad')
queue.push('good')
assert queue._redis.llen(queue.QUEUE_KEY) == 2
assert queue._redis.llen(queue.ACKBUF_KEY) == 0
assert queue._redis.llen(queue.BACKUP) == 0
uid_bad, payload_bad = queue.pop()
# Pop triggered first autoclean run before popping. At that time the
# ackbuf was still empty, so nothing was moved to backup. But the
# backup lock was set, to delay the next autoclean run for
# autoclean_interval seconds.
assert queue._redis.llen(queue.QUEUE_KEY) == 1
assert queue._redis.llen(queue.ACKBUF_KEY) == 1 # bad item
assert queue._redis.llen(queue.BACKUP) == 0
uid_good, payload_good = queue.pop()
# Autoclean started but instantly aborted due to backup lock.
assert queue._redis.llen(queue.ACKBUF_KEY) == 2
assert queue._redis.llen(queue.BACKUP) == 0
assert queue._redis.llen(queue.QUEUE_KEY) == 0
queue.ack(uid_good) # done with that one
assert queue._redis.llen(queue.ACKBUF_KEY) == 1 # bad item
assert queue._redis.llen(queue.BACKUP) == 0
assert queue._redis.llen(queue.QUEUE_KEY) == 0
# Pop after a autoclean_interval triggers cleanup internally
time.sleep(1.2)
assert queue.pop(timeout=-1) == (None, None)
assert queue._redis.llen(queue.ACKBUF_KEY) == 0
assert queue._redis.llen(queue.BACKUP) == 1
assert queue._redis.llen(queue.QUEUE_KEY) == 0
# Next pop triggers autoclean again; requeus; pops bad item again
time.sleep(1.2)
assert queue.pop(timeout=-1) == (uid_bad, payload_bad)
# After pop, queue is empty again, item waiting in ackbuf
assert queue._redis.llen(queue.ACKBUF_KEY) == 1
assert queue._redis.llen(queue.BACKUP) == 0
assert queue._redis.llen(queue.QUEUE_KEY) == 0
|
Add mini test suite. First for autoclean.
from uuid import uuid1
import time
from saferedisqueue import SafeRedisQueue
def test_autocleanup():
queue = SafeRedisQueue(
name='saferedisqueue-test-%s' % uuid1().hex,
autoclean_interval=1)
queue.push('bad')
queue.push('good')
assert queue._redis.llen(queue.QUEUE_KEY) == 2
assert queue._redis.llen(queue.ACKBUF_KEY) == 0
assert queue._redis.llen(queue.BACKUP) == 0
uid_bad, payload_bad = queue.pop()
# Pop triggered first autoclean run before popping. At that time the
# ackbuf was still empty, so nothing was moved to backup. But the
# backup lock was set, to delay the next autoclean run for
# autoclean_interval seconds.
assert queue._redis.llen(queue.QUEUE_KEY) == 1
assert queue._redis.llen(queue.ACKBUF_KEY) == 1 # bad item
assert queue._redis.llen(queue.BACKUP) == 0
uid_good, payload_good = queue.pop()
# Autoclean started but instantly aborted due to backup lock.
assert queue._redis.llen(queue.ACKBUF_KEY) == 2
assert queue._redis.llen(queue.BACKUP) == 0
assert queue._redis.llen(queue.QUEUE_KEY) == 0
queue.ack(uid_good) # done with that one
assert queue._redis.llen(queue.ACKBUF_KEY) == 1 # bad item
assert queue._redis.llen(queue.BACKUP) == 0
assert queue._redis.llen(queue.QUEUE_KEY) == 0
# Pop after a autoclean_interval triggers cleanup internally
time.sleep(1.2)
assert queue.pop(timeout=-1) == (None, None)
assert queue._redis.llen(queue.ACKBUF_KEY) == 0
assert queue._redis.llen(queue.BACKUP) == 1
assert queue._redis.llen(queue.QUEUE_KEY) == 0
# Next pop triggers autoclean again; requeus; pops bad item again
time.sleep(1.2)
assert queue.pop(timeout=-1) == (uid_bad, payload_bad)
# After pop, queue is empty again, item waiting in ackbuf
assert queue._redis.llen(queue.ACKBUF_KEY) == 1
assert queue._redis.llen(queue.BACKUP) == 0
assert queue._redis.llen(queue.QUEUE_KEY) == 0
|
<commit_before><commit_msg>Add mini test suite. First for autoclean.<commit_after>
from uuid import uuid1
import time
from saferedisqueue import SafeRedisQueue
def test_autocleanup():
queue = SafeRedisQueue(
name='saferedisqueue-test-%s' % uuid1().hex,
autoclean_interval=1)
queue.push('bad')
queue.push('good')
assert queue._redis.llen(queue.QUEUE_KEY) == 2
assert queue._redis.llen(queue.ACKBUF_KEY) == 0
assert queue._redis.llen(queue.BACKUP) == 0
uid_bad, payload_bad = queue.pop()
# Pop triggered first autoclean run before popping. At that time the
# ackbuf was still empty, so nothing was moved to backup. But the
# backup lock was set, to delay the next autoclean run for
# autoclean_interval seconds.
assert queue._redis.llen(queue.QUEUE_KEY) == 1
assert queue._redis.llen(queue.ACKBUF_KEY) == 1 # bad item
assert queue._redis.llen(queue.BACKUP) == 0
uid_good, payload_good = queue.pop()
# Autoclean started but instantly aborted due to backup lock.
assert queue._redis.llen(queue.ACKBUF_KEY) == 2
assert queue._redis.llen(queue.BACKUP) == 0
assert queue._redis.llen(queue.QUEUE_KEY) == 0
queue.ack(uid_good) # done with that one
assert queue._redis.llen(queue.ACKBUF_KEY) == 1 # bad item
assert queue._redis.llen(queue.BACKUP) == 0
assert queue._redis.llen(queue.QUEUE_KEY) == 0
# Pop after a autoclean_interval triggers cleanup internally
time.sleep(1.2)
assert queue.pop(timeout=-1) == (None, None)
assert queue._redis.llen(queue.ACKBUF_KEY) == 0
assert queue._redis.llen(queue.BACKUP) == 1
assert queue._redis.llen(queue.QUEUE_KEY) == 0
# Next pop triggers autoclean again; requeus; pops bad item again
time.sleep(1.2)
assert queue.pop(timeout=-1) == (uid_bad, payload_bad)
# After pop, queue is empty again, item waiting in ackbuf
assert queue._redis.llen(queue.ACKBUF_KEY) == 1
assert queue._redis.llen(queue.BACKUP) == 0
assert queue._redis.llen(queue.QUEUE_KEY) == 0
|
|
3cabac8174f9616b3a3c44b7e014d4b716e8873e
|
etalage/scripts/retrieve_piwik_custom_vars_and_export_to_csv.py
|
etalage/scripts/retrieve_piwik_custom_vars_and_export_to_csv.py
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Retrieve Piwik custom vars
# By: Sébastien Chauvel <schauvel@easter-eggs.com>
#
# Copyright (C) 2011, 2012 Easter-eggs
# http://gitorious.org/infos-pratiques/etalage
#
# This file is part of Etalage.
#
# Etalage is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Etalage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import base64
import csv
import datetime
import urllib
import urllib2
import json
import getpass
import operator
BASE_URL = u'https://webstats.easter-eggs.com/index.php'
params = {
'module': 'API',
'method': 'CustomVariables.getCustomVariables',
'format': 'JSON',
'idSite': '20',
'period': 'month',
'date': datetime.date.today().isoformat(),
'expanded': '1',
'filter_limit': '100'
}
CUSTOM_VARS_URL = '{}?{}'.format(BASE_URL, urllib.urlencode(params))
print repr(CUSTOM_VARS_URL)
username = raw_input('username: ')
password = getpass.getpass('password: ')
basic_auth = base64.encodestring('{0}:{1}'.format(username, password)).strip()
request = urllib2.Request(CUSTOM_VARS_URL)
request.method = 'POST'
request.add_header('Authorization', 'Basic {0}'.format(basic_auth))
response = urllib2.urlopen(request)
json_custom_vars = json.loads(response.read())
get_urls = operator.itemgetter('label', 'sum_daily_nb_uniq_visitors', 'nb_visits')
infos = map(get_urls, json_custom_vars[0]['subtable'])
f = open('custom_vars_report.csv', 'wb')
wr = csv.writer(f, quoting=csv.QUOTE_ALL)
for info in infos:
wr.writerow(info)
|
Add a script to retrieve Piwik custom variables and export some pieces of information to CSV
|
Add a script to retrieve Piwik custom variables and export some pieces of information to CSV
|
Python
|
agpl-3.0
|
Gentux/etalage,Gentux/etalage,Gentux/etalage
|
Add a script to retrieve Piwik custom variables and export some pieces of information to CSV
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Retrieve Piwik custom vars
# By: Sébastien Chauvel <schauvel@easter-eggs.com>
#
# Copyright (C) 2011, 2012 Easter-eggs
# http://gitorious.org/infos-pratiques/etalage
#
# This file is part of Etalage.
#
# Etalage is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Etalage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import base64
import csv
import datetime
import urllib
import urllib2
import json
import getpass
import operator
BASE_URL = u'https://webstats.easter-eggs.com/index.php'
params = {
'module': 'API',
'method': 'CustomVariables.getCustomVariables',
'format': 'JSON',
'idSite': '20',
'period': 'month',
'date': datetime.date.today().isoformat(),
'expanded': '1',
'filter_limit': '100'
}
CUSTOM_VARS_URL = '{}?{}'.format(BASE_URL, urllib.urlencode(params))
print repr(CUSTOM_VARS_URL)
username = raw_input('username: ')
password = getpass.getpass('password: ')
basic_auth = base64.encodestring('{0}:{1}'.format(username, password)).strip()
request = urllib2.Request(CUSTOM_VARS_URL)
request.method = 'POST'
request.add_header('Authorization', 'Basic {0}'.format(basic_auth))
response = urllib2.urlopen(request)
json_custom_vars = json.loads(response.read())
get_urls = operator.itemgetter('label', 'sum_daily_nb_uniq_visitors', 'nb_visits')
infos = map(get_urls, json_custom_vars[0]['subtable'])
f = open('custom_vars_report.csv', 'wb')
wr = csv.writer(f, quoting=csv.QUOTE_ALL)
for info in infos:
wr.writerow(info)
|
<commit_before><commit_msg>Add a script to retrieve Piwik custom variables and export some pieces of information to CSV<commit_after>
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Retrieve Piwik custom vars
# By: Sébastien Chauvel <schauvel@easter-eggs.com>
#
# Copyright (C) 2011, 2012 Easter-eggs
# http://gitorious.org/infos-pratiques/etalage
#
# This file is part of Etalage.
#
# Etalage is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Etalage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import base64
import csv
import datetime
import urllib
import urllib2
import json
import getpass
import operator
BASE_URL = u'https://webstats.easter-eggs.com/index.php'
params = {
'module': 'API',
'method': 'CustomVariables.getCustomVariables',
'format': 'JSON',
'idSite': '20',
'period': 'month',
'date': datetime.date.today().isoformat(),
'expanded': '1',
'filter_limit': '100'
}
CUSTOM_VARS_URL = '{}?{}'.format(BASE_URL, urllib.urlencode(params))
print repr(CUSTOM_VARS_URL)
username = raw_input('username: ')
password = getpass.getpass('password: ')
basic_auth = base64.encodestring('{0}:{1}'.format(username, password)).strip()
request = urllib2.Request(CUSTOM_VARS_URL)
request.method = 'POST'
request.add_header('Authorization', 'Basic {0}'.format(basic_auth))
response = urllib2.urlopen(request)
json_custom_vars = json.loads(response.read())
get_urls = operator.itemgetter('label', 'sum_daily_nb_uniq_visitors', 'nb_visits')
infos = map(get_urls, json_custom_vars[0]['subtable'])
f = open('custom_vars_report.csv', 'wb')
wr = csv.writer(f, quoting=csv.QUOTE_ALL)
for info in infos:
wr.writerow(info)
|
Add a script to retrieve Piwik custom variables and export some pieces of information to CSV#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Retrieve Piwik custom vars
# By: Sébastien Chauvel <schauvel@easter-eggs.com>
#
# Copyright (C) 2011, 2012 Easter-eggs
# http://gitorious.org/infos-pratiques/etalage
#
# This file is part of Etalage.
#
# Etalage is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Etalage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import base64
import csv
import datetime
import urllib
import urllib2
import json
import getpass
import operator
BASE_URL = u'https://webstats.easter-eggs.com/index.php'
params = {
'module': 'API',
'method': 'CustomVariables.getCustomVariables',
'format': 'JSON',
'idSite': '20',
'period': 'month',
'date': datetime.date.today().isoformat(),
'expanded': '1',
'filter_limit': '100'
}
CUSTOM_VARS_URL = '{}?{}'.format(BASE_URL, urllib.urlencode(params))
print repr(CUSTOM_VARS_URL)
username = raw_input('username: ')
password = getpass.getpass('password: ')
basic_auth = base64.encodestring('{0}:{1}'.format(username, password)).strip()
request = urllib2.Request(CUSTOM_VARS_URL)
request.method = 'POST'
request.add_header('Authorization', 'Basic {0}'.format(basic_auth))
response = urllib2.urlopen(request)
json_custom_vars = json.loads(response.read())
get_urls = operator.itemgetter('label', 'sum_daily_nb_uniq_visitors', 'nb_visits')
infos = map(get_urls, json_custom_vars[0]['subtable'])
f = open('custom_vars_report.csv', 'wb')
wr = csv.writer(f, quoting=csv.QUOTE_ALL)
for info in infos:
wr.writerow(info)
|
<commit_before><commit_msg>Add a script to retrieve Piwik custom variables and export some pieces of information to CSV<commit_after>#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Retrieve Piwik custom vars
# By: Sébastien Chauvel <schauvel@easter-eggs.com>
#
# Copyright (C) 2011, 2012 Easter-eggs
# http://gitorious.org/infos-pratiques/etalage
#
# This file is part of Etalage.
#
# Etalage is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Etalage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import base64
import csv
import datetime
import urllib
import urllib2
import json
import getpass
import operator
BASE_URL = u'https://webstats.easter-eggs.com/index.php'
params = {
'module': 'API',
'method': 'CustomVariables.getCustomVariables',
'format': 'JSON',
'idSite': '20',
'period': 'month',
'date': datetime.date.today().isoformat(),
'expanded': '1',
'filter_limit': '100'
}
CUSTOM_VARS_URL = '{}?{}'.format(BASE_URL, urllib.urlencode(params))
print repr(CUSTOM_VARS_URL)
username = raw_input('username: ')
password = getpass.getpass('password: ')
basic_auth = base64.encodestring('{0}:{1}'.format(username, password)).strip()
request = urllib2.Request(CUSTOM_VARS_URL)
request.method = 'POST'
request.add_header('Authorization', 'Basic {0}'.format(basic_auth))
response = urllib2.urlopen(request)
json_custom_vars = json.loads(response.read())
get_urls = operator.itemgetter('label', 'sum_daily_nb_uniq_visitors', 'nb_visits')
infos = map(get_urls, json_custom_vars[0]['subtable'])
f = open('custom_vars_report.csv', 'wb')
wr = csv.writer(f, quoting=csv.QUOTE_ALL)
for info in infos:
wr.writerow(info)
|
|
2cb03e58454083293a97f9f5f95285cead046c05
|
pyopenapi/scanner/v2_0/upgrade/parameter_context.py
|
pyopenapi/scanner/v2_0/upgrade/parameter_context.py
|
from __future__ import absolute_import
class ParameterContext(object):
""" A parameter object in swagger 2.0 might be converted
to 'part of' a requestBody of a single parameter object
in Open API 3.0. It's relatively complex when doing this.
Need a context object to pass information from top converter
to lower converter
"""
def __init__(self, name, is_body=False, is_file=False):
self.__is_body = is_body
self.__is_file = is_file
self.__name = name
@property
def is_body(self):
return self.__is_body
@property
def is_file(self):
return self.__is_file
@property
def name(self):
return self.__name
|
Add ParameterContext object to pass information in a top-down favor
|
Add ParameterContext object to pass information in a top-down favor
|
Python
|
mit
|
mission-liao/pyopenapi
|
Add ParameterContext object to pass information in a top-down favor
|
from __future__ import absolute_import
class ParameterContext(object):
""" A parameter object in swagger 2.0 might be converted
to 'part of' a requestBody of a single parameter object
in Open API 3.0. It's relatively complex when doing this.
Need a context object to pass information from top converter
to lower converter
"""
def __init__(self, name, is_body=False, is_file=False):
self.__is_body = is_body
self.__is_file = is_file
self.__name = name
@property
def is_body(self):
return self.__is_body
@property
def is_file(self):
return self.__is_file
@property
def name(self):
return self.__name
|
<commit_before><commit_msg>Add ParameterContext object to pass information in a top-down favor<commit_after>
|
from __future__ import absolute_import
class ParameterContext(object):
""" A parameter object in swagger 2.0 might be converted
to 'part of' a requestBody of a single parameter object
in Open API 3.0. It's relatively complex when doing this.
Need a context object to pass information from top converter
to lower converter
"""
def __init__(self, name, is_body=False, is_file=False):
self.__is_body = is_body
self.__is_file = is_file
self.__name = name
@property
def is_body(self):
return self.__is_body
@property
def is_file(self):
return self.__is_file
@property
def name(self):
return self.__name
|
Add ParameterContext object to pass information in a top-down favorfrom __future__ import absolute_import
class ParameterContext(object):
""" A parameter object in swagger 2.0 might be converted
to 'part of' a requestBody of a single parameter object
in Open API 3.0. It's relatively complex when doing this.
Need a context object to pass information from top converter
to lower converter
"""
def __init__(self, name, is_body=False, is_file=False):
self.__is_body = is_body
self.__is_file = is_file
self.__name = name
@property
def is_body(self):
return self.__is_body
@property
def is_file(self):
return self.__is_file
@property
def name(self):
return self.__name
|
<commit_before><commit_msg>Add ParameterContext object to pass information in a top-down favor<commit_after>from __future__ import absolute_import
class ParameterContext(object):
""" A parameter object in swagger 2.0 might be converted
to 'part of' a requestBody of a single parameter object
in Open API 3.0. It's relatively complex when doing this.
Need a context object to pass information from top converter
to lower converter
"""
def __init__(self, name, is_body=False, is_file=False):
self.__is_body = is_body
self.__is_file = is_file
self.__name = name
@property
def is_body(self):
return self.__is_body
@property
def is_file(self):
return self.__is_file
@property
def name(self):
return self.__name
|
|
9fb5e95e54f126a967a17a5f27cbf3539e3dc970
|
driver27/migrations/0007_auto_20170529_2211.py
|
driver27/migrations/0007_auto_20170529_2211.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-05-29 22:11
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('driver27', '0006_auto_20170529_2209'),
]
operations = [
migrations.AddField(
model_name='seat',
name='driver',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seats', to='driver27.Driver', verbose_name='driver'),
),
migrations.AlterField(
model_name='seat',
name='contender',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seats', to='driver27.Contender', verbose_name='contender'),
),
]
|
Add "driver" field in Seat and then delete "contender". (migration)
|
Add "driver" field in Seat and then delete "contender". (migration)
|
Python
|
mit
|
SRJ9/django-driver27,SRJ9/django-driver27,SRJ9/django-driver27
|
Add "driver" field in Seat and then delete "contender". (migration)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-05-29 22:11
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('driver27', '0006_auto_20170529_2209'),
]
operations = [
migrations.AddField(
model_name='seat',
name='driver',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seats', to='driver27.Driver', verbose_name='driver'),
),
migrations.AlterField(
model_name='seat',
name='contender',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seats', to='driver27.Contender', verbose_name='contender'),
),
]
|
<commit_before><commit_msg>Add "driver" field in Seat and then delete "contender". (migration)<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-05-29 22:11
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('driver27', '0006_auto_20170529_2209'),
]
operations = [
migrations.AddField(
model_name='seat',
name='driver',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seats', to='driver27.Driver', verbose_name='driver'),
),
migrations.AlterField(
model_name='seat',
name='contender',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seats', to='driver27.Contender', verbose_name='contender'),
),
]
|
Add "driver" field in Seat and then delete "contender". (migration)# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-05-29 22:11
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('driver27', '0006_auto_20170529_2209'),
]
operations = [
migrations.AddField(
model_name='seat',
name='driver',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seats', to='driver27.Driver', verbose_name='driver'),
),
migrations.AlterField(
model_name='seat',
name='contender',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seats', to='driver27.Contender', verbose_name='contender'),
),
]
|
<commit_before><commit_msg>Add "driver" field in Seat and then delete "contender". (migration)<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2017-05-29 22:11
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('driver27', '0006_auto_20170529_2209'),
]
operations = [
migrations.AddField(
model_name='seat',
name='driver',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seats', to='driver27.Driver', verbose_name='driver'),
),
migrations.AlterField(
model_name='seat',
name='contender',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='seats', to='driver27.Contender', verbose_name='contender'),
),
]
|
|
7252c754f20cbf825c23476778637b5d6d81f8be
|
examples/python/dynamicsComputationTutorial.py
|
examples/python/dynamicsComputationTutorial.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 23 11:35:46 2015
@author: adelpret
"""
import iDynTree
from iDynTree import DynamicsComputations
URDF_FILE = '/home/username/path/robot.urdf';
dynComp = DynamicsComputations();
dynComp.loadRobotModelFromFile(URDF_FILE);
print "The loaded model has", dynComp.getNrOfDegreesOfFreedom(), \
"internal degrees of freedom and",dynComp.getNrOfLinks(),"links."
dofs = dynComp.getNrOfDegreesOfFreedom();
q = iDynTree.VectorDynSize(dofs);
dq = iDynTree.VectorDynSize(dofs);
ddq = iDynTree.VectorDynSize(dofs);
for dof in range(dofs):
# For the sake of the example, we fill the joints vector with gibberish data (remember in any case
# that all quantities are expressed in radians-based units
q.setVal(dof, 1.0);
dq.setVal(dof, 0.4);
ddq.setVal(dof, 0.3);
# The spatial acceleration is a 6d acceleration vector.
# For all 6d quantities, we use the linear-angular serialization
# (the first three value are for the linear quantity, the
# the last three values are for the angular quantity)
gravity = iDynTree.SpatialAcc();
gravity.setVal(2, -9.81);
dynComp.setRobotState(q,dq,ddq,gravity);
jac = iDynTree.MatrixDynSize(6,6+dofs);
ok = dynComp.getFrameJacobian("lf_foot", jac);
if( not ok ):
print "Error in computing jacobian of frame " + "lf_foot";
else:
print "Jacobian of lf_foot is\n" + jac.toString();
links = dynComp.getNrOfLinks();
regr = iDynTree.MatrixDynSize(6+dofs,6+10*links);
ok = dynComp.getDynamicsRegressor(regr);
if( not ok ):
print "Error in computing the dynamics regressor";
else :
print "The dynamics regressor is\n" + regr.toString();
|
Add python example (same code of c++ DynamicsComputation tutorial)
|
Add python example (same code of c++ DynamicsComputation tutorial)
|
Python
|
lgpl-2.1
|
robotology/idyntree,robotology/idyntree,robotology/idyntree,robotology/idyntree,robotology/idyntree
|
Add python example (same code of c++ DynamicsComputation tutorial)
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 23 11:35:46 2015
@author: adelpret
"""
import iDynTree
from iDynTree import DynamicsComputations
URDF_FILE = '/home/username/path/robot.urdf';
dynComp = DynamicsComputations();
dynComp.loadRobotModelFromFile(URDF_FILE);
print "The loaded model has", dynComp.getNrOfDegreesOfFreedom(), \
"internal degrees of freedom and",dynComp.getNrOfLinks(),"links."
dofs = dynComp.getNrOfDegreesOfFreedom();
q = iDynTree.VectorDynSize(dofs);
dq = iDynTree.VectorDynSize(dofs);
ddq = iDynTree.VectorDynSize(dofs);
for dof in range(dofs):
# For the sake of the example, we fill the joints vector with gibberish data (remember in any case
# that all quantities are expressed in radians-based units
q.setVal(dof, 1.0);
dq.setVal(dof, 0.4);
ddq.setVal(dof, 0.3);
# The spatial acceleration is a 6d acceleration vector.
# For all 6d quantities, we use the linear-angular serialization
# (the first three value are for the linear quantity, the
# the last three values are for the angular quantity)
gravity = iDynTree.SpatialAcc();
gravity.setVal(2, -9.81);
dynComp.setRobotState(q,dq,ddq,gravity);
jac = iDynTree.MatrixDynSize(6,6+dofs);
ok = dynComp.getFrameJacobian("lf_foot", jac);
if( not ok ):
print "Error in computing jacobian of frame " + "lf_foot";
else:
print "Jacobian of lf_foot is\n" + jac.toString();
links = dynComp.getNrOfLinks();
regr = iDynTree.MatrixDynSize(6+dofs,6+10*links);
ok = dynComp.getDynamicsRegressor(regr);
if( not ok ):
print "Error in computing the dynamics regressor";
else :
print "The dynamics regressor is\n" + regr.toString();
|
<commit_before><commit_msg>Add python example (same code of c++ DynamicsComputation tutorial)<commit_after>
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 23 11:35:46 2015
@author: adelpret
"""
import iDynTree
from iDynTree import DynamicsComputations
URDF_FILE = '/home/username/path/robot.urdf';
dynComp = DynamicsComputations();
dynComp.loadRobotModelFromFile(URDF_FILE);
print "The loaded model has", dynComp.getNrOfDegreesOfFreedom(), \
"internal degrees of freedom and",dynComp.getNrOfLinks(),"links."
dofs = dynComp.getNrOfDegreesOfFreedom();
q = iDynTree.VectorDynSize(dofs);
dq = iDynTree.VectorDynSize(dofs);
ddq = iDynTree.VectorDynSize(dofs);
for dof in range(dofs):
# For the sake of the example, we fill the joints vector with gibberish data (remember in any case
# that all quantities are expressed in radians-based units
q.setVal(dof, 1.0);
dq.setVal(dof, 0.4);
ddq.setVal(dof, 0.3);
# The spatial acceleration is a 6d acceleration vector.
# For all 6d quantities, we use the linear-angular serialization
# (the first three value are for the linear quantity, the
# the last three values are for the angular quantity)
gravity = iDynTree.SpatialAcc();
gravity.setVal(2, -9.81);
dynComp.setRobotState(q,dq,ddq,gravity);
jac = iDynTree.MatrixDynSize(6,6+dofs);
ok = dynComp.getFrameJacobian("lf_foot", jac);
if( not ok ):
print "Error in computing jacobian of frame " + "lf_foot";
else:
print "Jacobian of lf_foot is\n" + jac.toString();
links = dynComp.getNrOfLinks();
regr = iDynTree.MatrixDynSize(6+dofs,6+10*links);
ok = dynComp.getDynamicsRegressor(regr);
if( not ok ):
print "Error in computing the dynamics regressor";
else :
print "The dynamics regressor is\n" + regr.toString();
|
Add python example (same code of c++ DynamicsComputation tutorial)# -*- coding: utf-8 -*-
"""
Created on Tue Jun 23 11:35:46 2015
@author: adelpret
"""
import iDynTree
from iDynTree import DynamicsComputations
URDF_FILE = '/home/username/path/robot.urdf';
dynComp = DynamicsComputations();
dynComp.loadRobotModelFromFile(URDF_FILE);
print "The loaded model has", dynComp.getNrOfDegreesOfFreedom(), \
"internal degrees of freedom and",dynComp.getNrOfLinks(),"links."
dofs = dynComp.getNrOfDegreesOfFreedom();
q = iDynTree.VectorDynSize(dofs);
dq = iDynTree.VectorDynSize(dofs);
ddq = iDynTree.VectorDynSize(dofs);
for dof in range(dofs):
# For the sake of the example, we fill the joints vector with gibberish data (remember in any case
# that all quantities are expressed in radians-based units
q.setVal(dof, 1.0);
dq.setVal(dof, 0.4);
ddq.setVal(dof, 0.3);
# The spatial acceleration is a 6d acceleration vector.
# For all 6d quantities, we use the linear-angular serialization
# (the first three value are for the linear quantity, the
# the last three values are for the angular quantity)
gravity = iDynTree.SpatialAcc();
gravity.setVal(2, -9.81);
dynComp.setRobotState(q,dq,ddq,gravity);
jac = iDynTree.MatrixDynSize(6,6+dofs);
ok = dynComp.getFrameJacobian("lf_foot", jac);
if( not ok ):
print "Error in computing jacobian of frame " + "lf_foot";
else:
print "Jacobian of lf_foot is\n" + jac.toString();
links = dynComp.getNrOfLinks();
regr = iDynTree.MatrixDynSize(6+dofs,6+10*links);
ok = dynComp.getDynamicsRegressor(regr);
if( not ok ):
print "Error in computing the dynamics regressor";
else :
print "The dynamics regressor is\n" + regr.toString();
|
<commit_before><commit_msg>Add python example (same code of c++ DynamicsComputation tutorial)<commit_after># -*- coding: utf-8 -*-
"""
Created on Tue Jun 23 11:35:46 2015
@author: adelpret
"""
import iDynTree
from iDynTree import DynamicsComputations
URDF_FILE = '/home/username/path/robot.urdf';
dynComp = DynamicsComputations();
dynComp.loadRobotModelFromFile(URDF_FILE);
print "The loaded model has", dynComp.getNrOfDegreesOfFreedom(), \
"internal degrees of freedom and",dynComp.getNrOfLinks(),"links."
dofs = dynComp.getNrOfDegreesOfFreedom();
q = iDynTree.VectorDynSize(dofs);
dq = iDynTree.VectorDynSize(dofs);
ddq = iDynTree.VectorDynSize(dofs);
for dof in range(dofs):
# For the sake of the example, we fill the joints vector with gibberish data (remember in any case
# that all quantities are expressed in radians-based units
q.setVal(dof, 1.0);
dq.setVal(dof, 0.4);
ddq.setVal(dof, 0.3);
# The spatial acceleration is a 6d acceleration vector.
# For all 6d quantities, we use the linear-angular serialization
# (the first three value are for the linear quantity, the
# the last three values are for the angular quantity)
gravity = iDynTree.SpatialAcc();
gravity.setVal(2, -9.81);
dynComp.setRobotState(q,dq,ddq,gravity);
jac = iDynTree.MatrixDynSize(6,6+dofs);
ok = dynComp.getFrameJacobian("lf_foot", jac);
if( not ok ):
print "Error in computing jacobian of frame " + "lf_foot";
else:
print "Jacobian of lf_foot is\n" + jac.toString();
links = dynComp.getNrOfLinks();
regr = iDynTree.MatrixDynSize(6+dofs,6+10*links);
ok = dynComp.getDynamicsRegressor(regr);
if( not ok ):
print "Error in computing the dynamics regressor";
else :
print "The dynamics regressor is\n" + regr.toString();
|
|
27e6a713ff082139785c214d7f9a4cc86fcc823a
|
tools/MergeGeocodes.py
|
tools/MergeGeocodes.py
|
#!/usr/bin/env python3
############################################################################
#
# File: MergeGeocodes.py
# Last Edit: 2015-02-26
# Author: Alexander Grüneberg <alexander.grueneberg@googlemail.com>
# Purpose: Merge geocoded locations with original CSV input file.
#
############################################################################
import csv
import json
CSVFILE = 'Bham_Traffic_Accidents_2014.csv'
GEOFILE = 'Bham_Geocodings_2014.json'
OUTFILE = 'Bham_Traffic_Accidents_2014_merged.csv'
def main():
# load geocoded data
geocodes = {}
print('>> Reading geocoded data from ' + GEOFILE)
with open(GEOFILE) as f:
geocodes = json.load(f)
# load csv file
print('>> Reading CSV data from ' + CSVFILE)
with open(OUTFILE, 'w') as outfile:
writer = csv.writer(outfile)
with open(CSVFILE) as csvfile:
c = csv.reader(csvfile)
for i, record in enumerate(c):
if i == 0:
headers = record
headers.append('Latitude')
headers.append('Longitude')
writer.writerow(headers)
else:
location = record[headers.index('Location')]
if location in geocodes:
coordinates = geocodes[location].strip('()').split(', ')
else:
coordinates = ['', '']
line = record
line.append(coordinates[0])
line.append(coordinates[1])
writer.writerow(line)
print('>> Complete, see ' + OUTFILE)
if __name__=='__main__':
main()
|
Add script to merge geocodes with accident data.
|
Add script to merge geocodes with accident data.
|
Python
|
unlicense
|
CodeforBirmingham/traffic-accident-reports,CodeforBirmingham/traffic-accident-reports
|
Add script to merge geocodes with accident data.
|
#!/usr/bin/env python3
############################################################################
#
# File: MergeGeocodes.py
# Last Edit: 2015-02-26
# Author: Alexander Grüneberg <alexander.grueneberg@googlemail.com>
# Purpose: Merge geocoded locations with original CSV input file.
#
############################################################################
import csv
import json
CSVFILE = 'Bham_Traffic_Accidents_2014.csv'
GEOFILE = 'Bham_Geocodings_2014.json'
OUTFILE = 'Bham_Traffic_Accidents_2014_merged.csv'
def main():
# load geocoded data
geocodes = {}
print('>> Reading geocoded data from ' + GEOFILE)
with open(GEOFILE) as f:
geocodes = json.load(f)
# load csv file
print('>> Reading CSV data from ' + CSVFILE)
with open(OUTFILE, 'w') as outfile:
writer = csv.writer(outfile)
with open(CSVFILE) as csvfile:
c = csv.reader(csvfile)
for i, record in enumerate(c):
if i == 0:
headers = record
headers.append('Latitude')
headers.append('Longitude')
writer.writerow(headers)
else:
location = record[headers.index('Location')]
if location in geocodes:
coordinates = geocodes[location].strip('()').split(', ')
else:
coordinates = ['', '']
line = record
line.append(coordinates[0])
line.append(coordinates[1])
writer.writerow(line)
print('>> Complete, see ' + OUTFILE)
if __name__=='__main__':
main()
|
<commit_before><commit_msg>Add script to merge geocodes with accident data.<commit_after>
|
#!/usr/bin/env python3
############################################################################
#
# File: MergeGeocodes.py
# Last Edit: 2015-02-26
# Author: Alexander Grüneberg <alexander.grueneberg@googlemail.com>
# Purpose: Merge geocoded locations with original CSV input file.
#
############################################################################
import csv
import json
CSVFILE = 'Bham_Traffic_Accidents_2014.csv'
GEOFILE = 'Bham_Geocodings_2014.json'
OUTFILE = 'Bham_Traffic_Accidents_2014_merged.csv'
def main():
# load geocoded data
geocodes = {}
print('>> Reading geocoded data from ' + GEOFILE)
with open(GEOFILE) as f:
geocodes = json.load(f)
# load csv file
print('>> Reading CSV data from ' + CSVFILE)
with open(OUTFILE, 'w') as outfile:
writer = csv.writer(outfile)
with open(CSVFILE) as csvfile:
c = csv.reader(csvfile)
for i, record in enumerate(c):
if i == 0:
headers = record
headers.append('Latitude')
headers.append('Longitude')
writer.writerow(headers)
else:
location = record[headers.index('Location')]
if location in geocodes:
coordinates = geocodes[location].strip('()').split(', ')
else:
coordinates = ['', '']
line = record
line.append(coordinates[0])
line.append(coordinates[1])
writer.writerow(line)
print('>> Complete, see ' + OUTFILE)
if __name__=='__main__':
main()
|
Add script to merge geocodes with accident data.#!/usr/bin/env python3
############################################################################
#
# File: MergeGeocodes.py
# Last Edit: 2015-02-26
# Author: Alexander Grüneberg <alexander.grueneberg@googlemail.com>
# Purpose: Merge geocoded locations with original CSV input file.
#
############################################################################
import csv
import json
CSVFILE = 'Bham_Traffic_Accidents_2014.csv'
GEOFILE = 'Bham_Geocodings_2014.json'
OUTFILE = 'Bham_Traffic_Accidents_2014_merged.csv'
def main():
# load geocoded data
geocodes = {}
print('>> Reading geocoded data from ' + GEOFILE)
with open(GEOFILE) as f:
geocodes = json.load(f)
# load csv file
print('>> Reading CSV data from ' + CSVFILE)
with open(OUTFILE, 'w') as outfile:
writer = csv.writer(outfile)
with open(CSVFILE) as csvfile:
c = csv.reader(csvfile)
for i, record in enumerate(c):
if i == 0:
headers = record
headers.append('Latitude')
headers.append('Longitude')
writer.writerow(headers)
else:
location = record[headers.index('Location')]
if location in geocodes:
coordinates = geocodes[location].strip('()').split(', ')
else:
coordinates = ['', '']
line = record
line.append(coordinates[0])
line.append(coordinates[1])
writer.writerow(line)
print('>> Complete, see ' + OUTFILE)
if __name__=='__main__':
main()
|
<commit_before><commit_msg>Add script to merge geocodes with accident data.<commit_after>#!/usr/bin/env python3
############################################################################
#
# File: MergeGeocodes.py
# Last Edit: 2015-02-26
# Author: Alexander Grüneberg <alexander.grueneberg@googlemail.com>
# Purpose: Merge geocoded locations with original CSV input file.
#
############################################################################
import csv
import json
CSVFILE = 'Bham_Traffic_Accidents_2014.csv'
GEOFILE = 'Bham_Geocodings_2014.json'
OUTFILE = 'Bham_Traffic_Accidents_2014_merged.csv'
def main():
# load geocoded data
geocodes = {}
print('>> Reading geocoded data from ' + GEOFILE)
with open(GEOFILE) as f:
geocodes = json.load(f)
# load csv file
print('>> Reading CSV data from ' + CSVFILE)
with open(OUTFILE, 'w') as outfile:
writer = csv.writer(outfile)
with open(CSVFILE) as csvfile:
c = csv.reader(csvfile)
for i, record in enumerate(c):
if i == 0:
headers = record
headers.append('Latitude')
headers.append('Longitude')
writer.writerow(headers)
else:
location = record[headers.index('Location')]
if location in geocodes:
coordinates = geocodes[location].strip('()').split(', ')
else:
coordinates = ['', '']
line = record
line.append(coordinates[0])
line.append(coordinates[1])
writer.writerow(line)
print('>> Complete, see ' + OUTFILE)
if __name__=='__main__':
main()
|
|
ab82983418c4c104e741c70b797057b9c424c647
|
Testing/test_PiecewiseDynamics.py
|
Testing/test_PiecewiseDynamics.py
|
import unittest
from SloppyCell.ReactionNetworks import *
import TestNetwork
class test_PiecewiseDynamics(unittest.TestCase):
#XXX: Assignment rules currently not supported. To do so, add a vector
# version of piecewise to Trajectory_mod.py
#def test_assignment(self):
# net = TestNetwork.net.copy('piecewise')
# net.disable_deriv_funcs()
# net.compile(disable_c=True)
# net.addSpecies('C', 'basic')
# net.add_assignment_rule('C', 'piecewise(2.0, time < 1.0, 1.0)')
# traj = Dynamics.integrate(net, [0, 0.5, 1.5])
def test_raterule(self):
net = TestNetwork.net.copy('piecewise')
net.addSpecies('C', 'basic', 0)
net.add_rate_rule('C', 'piecewise(2.0, time < 1.0, 1.0)')
net.disable_deriv_funcs()
net.disable_c = True
traj = Dynamics.integrate(net, [0, 0.5, 1.5])
self.assertAlmostEqual(traj.get_var_val('C', 1.5), 2.5, 3)
suite = unittest.makeSuite(test_PiecewiseDynamics)
if __name__ == '__main__':
unittest.main()
|
Add basic test for piecewise in dynamical equations
|
Add basic test for piecewise in dynamical equations
|
Python
|
bsd-3-clause
|
GutenkunstLab/SloppyCell,GutenkunstLab/SloppyCell
|
Add basic test for piecewise in dynamical equations
|
import unittest
from SloppyCell.ReactionNetworks import *
import TestNetwork
class test_PiecewiseDynamics(unittest.TestCase):
#XXX: Assignment rules currently not supported. To do so, add a vector
# version of piecewise to Trajectory_mod.py
#def test_assignment(self):
# net = TestNetwork.net.copy('piecewise')
# net.disable_deriv_funcs()
# net.compile(disable_c=True)
# net.addSpecies('C', 'basic')
# net.add_assignment_rule('C', 'piecewise(2.0, time < 1.0, 1.0)')
# traj = Dynamics.integrate(net, [0, 0.5, 1.5])
def test_raterule(self):
net = TestNetwork.net.copy('piecewise')
net.addSpecies('C', 'basic', 0)
net.add_rate_rule('C', 'piecewise(2.0, time < 1.0, 1.0)')
net.disable_deriv_funcs()
net.disable_c = True
traj = Dynamics.integrate(net, [0, 0.5, 1.5])
self.assertAlmostEqual(traj.get_var_val('C', 1.5), 2.5, 3)
suite = unittest.makeSuite(test_PiecewiseDynamics)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add basic test for piecewise in dynamical equations<commit_after>
|
import unittest
from SloppyCell.ReactionNetworks import *
import TestNetwork
class test_PiecewiseDynamics(unittest.TestCase):
#XXX: Assignment rules currently not supported. To do so, add a vector
# version of piecewise to Trajectory_mod.py
#def test_assignment(self):
# net = TestNetwork.net.copy('piecewise')
# net.disable_deriv_funcs()
# net.compile(disable_c=True)
# net.addSpecies('C', 'basic')
# net.add_assignment_rule('C', 'piecewise(2.0, time < 1.0, 1.0)')
# traj = Dynamics.integrate(net, [0, 0.5, 1.5])
def test_raterule(self):
net = TestNetwork.net.copy('piecewise')
net.addSpecies('C', 'basic', 0)
net.add_rate_rule('C', 'piecewise(2.0, time < 1.0, 1.0)')
net.disable_deriv_funcs()
net.disable_c = True
traj = Dynamics.integrate(net, [0, 0.5, 1.5])
self.assertAlmostEqual(traj.get_var_val('C', 1.5), 2.5, 3)
suite = unittest.makeSuite(test_PiecewiseDynamics)
if __name__ == '__main__':
unittest.main()
|
Add basic test for piecewise in dynamical equationsimport unittest
from SloppyCell.ReactionNetworks import *
import TestNetwork
class test_PiecewiseDynamics(unittest.TestCase):
#XXX: Assignment rules currently not supported. To do so, add a vector
# version of piecewise to Trajectory_mod.py
#def test_assignment(self):
# net = TestNetwork.net.copy('piecewise')
# net.disable_deriv_funcs()
# net.compile(disable_c=True)
# net.addSpecies('C', 'basic')
# net.add_assignment_rule('C', 'piecewise(2.0, time < 1.0, 1.0)')
# traj = Dynamics.integrate(net, [0, 0.5, 1.5])
def test_raterule(self):
net = TestNetwork.net.copy('piecewise')
net.addSpecies('C', 'basic', 0)
net.add_rate_rule('C', 'piecewise(2.0, time < 1.0, 1.0)')
net.disable_deriv_funcs()
net.disable_c = True
traj = Dynamics.integrate(net, [0, 0.5, 1.5])
self.assertAlmostEqual(traj.get_var_val('C', 1.5), 2.5, 3)
suite = unittest.makeSuite(test_PiecewiseDynamics)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add basic test for piecewise in dynamical equations<commit_after>import unittest
from SloppyCell.ReactionNetworks import *
import TestNetwork
class test_PiecewiseDynamics(unittest.TestCase):
#XXX: Assignment rules currently not supported. To do so, add a vector
# version of piecewise to Trajectory_mod.py
#def test_assignment(self):
# net = TestNetwork.net.copy('piecewise')
# net.disable_deriv_funcs()
# net.compile(disable_c=True)
# net.addSpecies('C', 'basic')
# net.add_assignment_rule('C', 'piecewise(2.0, time < 1.0, 1.0)')
# traj = Dynamics.integrate(net, [0, 0.5, 1.5])
def test_raterule(self):
net = TestNetwork.net.copy('piecewise')
net.addSpecies('C', 'basic', 0)
net.add_rate_rule('C', 'piecewise(2.0, time < 1.0, 1.0)')
net.disable_deriv_funcs()
net.disable_c = True
traj = Dynamics.integrate(net, [0, 0.5, 1.5])
self.assertAlmostEqual(traj.get_var_val('C', 1.5), 2.5, 3)
suite = unittest.makeSuite(test_PiecewiseDynamics)
if __name__ == '__main__':
unittest.main()
|
|
183c02d56848035e4ec162776317df82d5b43d4d
|
test_merge_sort.py
|
test_merge_sort.py
|
# -*- coding: utf-8 -*-
from merge_sort import merge_sort
def test_sorted():
my_list = list(range(100))
merge_sort(my_list)
assert my_list == list(range(100))
def test_reverse():
my_list = list(range(100))[::-1]
merge_sort(my_list)
assert my_list == list(range(100))
def test_empty():
my_list = []
merge_sort(my_list)
assert my_list == []
def test_abc():
my_list = ['a', 'b', 'c', 'd', 'e']
merge_sort(my_list)
assert my_list == ['a', 'b', 'c', 'd', 'e']
my_list = ['e', 'd', 'c', 'b', 'a']
merge_sort(my_list)
assert my_list == ['a', 'b', 'c', 'd', 'e']
def test_unicode():
my_list = ['π']
merge_sort(my_list)
assert my_list == ['\xcf\x80']
def test_duplicate():
my_list = [1, 2, 2, 5, 3]
merge_sort(my_list)
assert my_list == [1, 2, 2, 3, 5]
|
Add tests for merge sort
|
Add tests for merge sort
|
Python
|
mit
|
nbeck90/data_structures_2
|
Add tests for merge sort
|
# -*- coding: utf-8 -*-
from merge_sort import merge_sort
def test_sorted():
my_list = list(range(100))
merge_sort(my_list)
assert my_list == list(range(100))
def test_reverse():
my_list = list(range(100))[::-1]
merge_sort(my_list)
assert my_list == list(range(100))
def test_empty():
my_list = []
merge_sort(my_list)
assert my_list == []
def test_abc():
my_list = ['a', 'b', 'c', 'd', 'e']
merge_sort(my_list)
assert my_list == ['a', 'b', 'c', 'd', 'e']
my_list = ['e', 'd', 'c', 'b', 'a']
merge_sort(my_list)
assert my_list == ['a', 'b', 'c', 'd', 'e']
def test_unicode():
my_list = ['π']
merge_sort(my_list)
assert my_list == ['\xcf\x80']
def test_duplicate():
my_list = [1, 2, 2, 5, 3]
merge_sort(my_list)
assert my_list == [1, 2, 2, 3, 5]
|
<commit_before><commit_msg>Add tests for merge sort<commit_after>
|
# -*- coding: utf-8 -*-
from merge_sort import merge_sort
def test_sorted():
my_list = list(range(100))
merge_sort(my_list)
assert my_list == list(range(100))
def test_reverse():
my_list = list(range(100))[::-1]
merge_sort(my_list)
assert my_list == list(range(100))
def test_empty():
my_list = []
merge_sort(my_list)
assert my_list == []
def test_abc():
my_list = ['a', 'b', 'c', 'd', 'e']
merge_sort(my_list)
assert my_list == ['a', 'b', 'c', 'd', 'e']
my_list = ['e', 'd', 'c', 'b', 'a']
merge_sort(my_list)
assert my_list == ['a', 'b', 'c', 'd', 'e']
def test_unicode():
my_list = ['π']
merge_sort(my_list)
assert my_list == ['\xcf\x80']
def test_duplicate():
my_list = [1, 2, 2, 5, 3]
merge_sort(my_list)
assert my_list == [1, 2, 2, 3, 5]
|
Add tests for merge sort# -*- coding: utf-8 -*-
from merge_sort import merge_sort
def test_sorted():
my_list = list(range(100))
merge_sort(my_list)
assert my_list == list(range(100))
def test_reverse():
my_list = list(range(100))[::-1]
merge_sort(my_list)
assert my_list == list(range(100))
def test_empty():
my_list = []
merge_sort(my_list)
assert my_list == []
def test_abc():
my_list = ['a', 'b', 'c', 'd', 'e']
merge_sort(my_list)
assert my_list == ['a', 'b', 'c', 'd', 'e']
my_list = ['e', 'd', 'c', 'b', 'a']
merge_sort(my_list)
assert my_list == ['a', 'b', 'c', 'd', 'e']
def test_unicode():
my_list = ['π']
merge_sort(my_list)
assert my_list == ['\xcf\x80']
def test_duplicate():
my_list = [1, 2, 2, 5, 3]
merge_sort(my_list)
assert my_list == [1, 2, 2, 3, 5]
|
<commit_before><commit_msg>Add tests for merge sort<commit_after># -*- coding: utf-8 -*-
from merge_sort import merge_sort
def test_sorted():
my_list = list(range(100))
merge_sort(my_list)
assert my_list == list(range(100))
def test_reverse():
my_list = list(range(100))[::-1]
merge_sort(my_list)
assert my_list == list(range(100))
def test_empty():
my_list = []
merge_sort(my_list)
assert my_list == []
def test_abc():
my_list = ['a', 'b', 'c', 'd', 'e']
merge_sort(my_list)
assert my_list == ['a', 'b', 'c', 'd', 'e']
my_list = ['e', 'd', 'c', 'b', 'a']
merge_sort(my_list)
assert my_list == ['a', 'b', 'c', 'd', 'e']
def test_unicode():
my_list = ['π']
merge_sort(my_list)
assert my_list == ['\xcf\x80']
def test_duplicate():
my_list = [1, 2, 2, 5, 3]
merge_sort(my_list)
assert my_list == [1, 2, 2, 3, 5]
|
|
5cc3ae018c09a3d642fd83c890ef137681e07bdc
|
tests/test_lspr.py
|
tests/test_lspr.py
|
import os
import pickle
import pytest
import functools
try:
import pycuda
except ImportError:
ans = input('PyCUDA not found. Regression tests will take forever. Do you want to continue? [y/n] ')
if ans in ['Y', 'y']:
pass
else:
sys.exit()
from pygbe.main import main
@pytest.mark.parametrize('key_int', ['total_elements',
'iterations'])
def test_lspr_elements_iterations(key_int):
results = get_results()
with open('lspr.pickle', 'rb') as f:
base_results = pickle.load(f)
assert base_results[key_int] == results[key_int]
@pytest.mark.parametrize('key', ['Cext',
'surf_Cext'])
def test_lspr(key):
results = get_results()
with open('lspr.pickle', 'rb') as f:
base_results = pickle.load(f)
#Cext and surf_Cext are lists, for the example are one element lists, so
#to check the assertion we access that element. i.e [0]
assert abs(base_results[key][0] - results[key][0]) / abs(base_results[key][0] + 1e-16) < 1e-12
@functools.lru_cache(4)
def get_results():
print('Generating results for lspr example...')
if os.getcwd().rsplit('/', 1)[1] == 'tests':
results = main(['','../examples/lspr'],
log_output=False,
return_results_dict=True)
elif os.getcwd().rsplit('/', 1)[1] == 'pygbe':
results = main(['','./examples/lspr'],
log_output=False,
return_results_dict=True)
else:
print("Run tests from either the main repo directory or the tests directory")
return results
|
Add lspr example regression test
|
Add lspr example regression test
|
Python
|
bsd-3-clause
|
barbagroup/pygbe,barbagroup/pygbe,barbagroup/pygbe
|
Add lspr example regression test
|
import os
import pickle
import pytest
import functools
try:
import pycuda
except ImportError:
ans = input('PyCUDA not found. Regression tests will take forever. Do you want to continue? [y/n] ')
if ans in ['Y', 'y']:
pass
else:
sys.exit()
from pygbe.main import main
@pytest.mark.parametrize('key_int', ['total_elements',
'iterations'])
def test_lspr_elements_iterations(key_int):
results = get_results()
with open('lspr.pickle', 'rb') as f:
base_results = pickle.load(f)
assert base_results[key_int] == results[key_int]
@pytest.mark.parametrize('key', ['Cext',
'surf_Cext'])
def test_lspr(key):
results = get_results()
with open('lspr.pickle', 'rb') as f:
base_results = pickle.load(f)
#Cext and surf_Cext are lists, for the example are one element lists, so
#to check the assertion we access that element. i.e [0]
assert abs(base_results[key][0] - results[key][0]) / abs(base_results[key][0] + 1e-16) < 1e-12
@functools.lru_cache(4)
def get_results():
print('Generating results for lspr example...')
if os.getcwd().rsplit('/', 1)[1] == 'tests':
results = main(['','../examples/lspr'],
log_output=False,
return_results_dict=True)
elif os.getcwd().rsplit('/', 1)[1] == 'pygbe':
results = main(['','./examples/lspr'],
log_output=False,
return_results_dict=True)
else:
print("Run tests from either the main repo directory or the tests directory")
return results
|
<commit_before><commit_msg>Add lspr example regression test<commit_after>
|
import os
import pickle
import pytest
import functools
try:
import pycuda
except ImportError:
ans = input('PyCUDA not found. Regression tests will take forever. Do you want to continue? [y/n] ')
if ans in ['Y', 'y']:
pass
else:
sys.exit()
from pygbe.main import main
@pytest.mark.parametrize('key_int', ['total_elements',
'iterations'])
def test_lspr_elements_iterations(key_int):
results = get_results()
with open('lspr.pickle', 'rb') as f:
base_results = pickle.load(f)
assert base_results[key_int] == results[key_int]
@pytest.mark.parametrize('key', ['Cext',
'surf_Cext'])
def test_lspr(key):
results = get_results()
with open('lspr.pickle', 'rb') as f:
base_results = pickle.load(f)
#Cext and surf_Cext are lists, for the example are one element lists, so
#to check the assertion we access that element. i.e [0]
assert abs(base_results[key][0] - results[key][0]) / abs(base_results[key][0] + 1e-16) < 1e-12
@functools.lru_cache(4)
def get_results():
print('Generating results for lspr example...')
if os.getcwd().rsplit('/', 1)[1] == 'tests':
results = main(['','../examples/lspr'],
log_output=False,
return_results_dict=True)
elif os.getcwd().rsplit('/', 1)[1] == 'pygbe':
results = main(['','./examples/lspr'],
log_output=False,
return_results_dict=True)
else:
print("Run tests from either the main repo directory or the tests directory")
return results
|
Add lspr example regression testimport os
import pickle
import pytest
import functools
try:
import pycuda
except ImportError:
ans = input('PyCUDA not found. Regression tests will take forever. Do you want to continue? [y/n] ')
if ans in ['Y', 'y']:
pass
else:
sys.exit()
from pygbe.main import main
@pytest.mark.parametrize('key_int', ['total_elements',
'iterations'])
def test_lspr_elements_iterations(key_int):
results = get_results()
with open('lspr.pickle', 'rb') as f:
base_results = pickle.load(f)
assert base_results[key_int] == results[key_int]
@pytest.mark.parametrize('key', ['Cext',
'surf_Cext'])
def test_lspr(key):
results = get_results()
with open('lspr.pickle', 'rb') as f:
base_results = pickle.load(f)
#Cext and surf_Cext are lists, for the example are one element lists, so
#to check the assertion we access that element. i.e [0]
assert abs(base_results[key][0] - results[key][0]) / abs(base_results[key][0] + 1e-16) < 1e-12
@functools.lru_cache(4)
def get_results():
print('Generating results for lspr example...')
if os.getcwd().rsplit('/', 1)[1] == 'tests':
results = main(['','../examples/lspr'],
log_output=False,
return_results_dict=True)
elif os.getcwd().rsplit('/', 1)[1] == 'pygbe':
results = main(['','./examples/lspr'],
log_output=False,
return_results_dict=True)
else:
print("Run tests from either the main repo directory or the tests directory")
return results
|
<commit_before><commit_msg>Add lspr example regression test<commit_after>import os
import pickle
import pytest
import functools
try:
import pycuda
except ImportError:
ans = input('PyCUDA not found. Regression tests will take forever. Do you want to continue? [y/n] ')
if ans in ['Y', 'y']:
pass
else:
sys.exit()
from pygbe.main import main
@pytest.mark.parametrize('key_int', ['total_elements',
'iterations'])
def test_lspr_elements_iterations(key_int):
results = get_results()
with open('lspr.pickle', 'rb') as f:
base_results = pickle.load(f)
assert base_results[key_int] == results[key_int]
@pytest.mark.parametrize('key', ['Cext',
'surf_Cext'])
def test_lspr(key):
results = get_results()
with open('lspr.pickle', 'rb') as f:
base_results = pickle.load(f)
#Cext and surf_Cext are lists, for the example are one element lists, so
#to check the assertion we access that element. i.e [0]
assert abs(base_results[key][0] - results[key][0]) / abs(base_results[key][0] + 1e-16) < 1e-12
@functools.lru_cache(4)
def get_results():
print('Generating results for lspr example...')
if os.getcwd().rsplit('/', 1)[1] == 'tests':
results = main(['','../examples/lspr'],
log_output=False,
return_results_dict=True)
elif os.getcwd().rsplit('/', 1)[1] == 'pygbe':
results = main(['','./examples/lspr'],
log_output=False,
return_results_dict=True)
else:
print("Run tests from either the main repo directory or the tests directory")
return results
|
|
e928094c43c076c72841efb1cc477f92d6a3281f
|
set1/stringlib.py
|
set1/stringlib.py
|
import base64
import unittest
def decode_hex(string):
return base64.b16decode(string, casefold=True)
def encode_hex(string):
return base64.b16encode(string)
def decode_base64(string):
return base64.standard_b64decode(string, validate=True)
def encode_base64(string):
return base64.standard_b64encode(string)
def xor_strings(string1, string2):
x = ''.join(str(int(a)^int(b)) for a, b in zip(string1, string2))
return encode_hex(x)
|
Refactor encoding in string library.
|
Refactor encoding in string library.
|
Python
|
mit
|
Renelvon/matasano
|
Refactor encoding in string library.
|
import base64
import unittest
def decode_hex(string):
return base64.b16decode(string, casefold=True)
def encode_hex(string):
return base64.b16encode(string)
def decode_base64(string):
return base64.standard_b64decode(string, validate=True)
def encode_base64(string):
return base64.standard_b64encode(string)
def xor_strings(string1, string2):
x = ''.join(str(int(a)^int(b)) for a, b in zip(string1, string2))
return encode_hex(x)
|
<commit_before><commit_msg>Refactor encoding in string library.<commit_after>
|
import base64
import unittest
def decode_hex(string):
return base64.b16decode(string, casefold=True)
def encode_hex(string):
return base64.b16encode(string)
def decode_base64(string):
return base64.standard_b64decode(string, validate=True)
def encode_base64(string):
return base64.standard_b64encode(string)
def xor_strings(string1, string2):
x = ''.join(str(int(a)^int(b)) for a, b in zip(string1, string2))
return encode_hex(x)
|
Refactor encoding in string library.import base64
import unittest
def decode_hex(string):
return base64.b16decode(string, casefold=True)
def encode_hex(string):
return base64.b16encode(string)
def decode_base64(string):
return base64.standard_b64decode(string, validate=True)
def encode_base64(string):
return base64.standard_b64encode(string)
def xor_strings(string1, string2):
x = ''.join(str(int(a)^int(b)) for a, b in zip(string1, string2))
return encode_hex(x)
|
<commit_before><commit_msg>Refactor encoding in string library.<commit_after>import base64
import unittest
def decode_hex(string):
return base64.b16decode(string, casefold=True)
def encode_hex(string):
return base64.b16encode(string)
def decode_base64(string):
return base64.standard_b64decode(string, validate=True)
def encode_base64(string):
return base64.standard_b64encode(string)
def xor_strings(string1, string2):
x = ''.join(str(int(a)^int(b)) for a, b in zip(string1, string2))
return encode_hex(x)
|
|
d0c00a73d9dc5b4bde076fce3c06dff34c9d48f6
|
test/test_pix_to_np.py
|
test/test_pix_to_np.py
|
#!/usr/bin/env python
#
# Test program to ensure that the Pix to/from numpy conversion routines are
# actually functioning as we think they're functioning
#
import tesseract_sip as tesseract
import numpy as np
def np_from_pix(pix):
'''
Converts a leptonica Pix object into a numpy array suitable
for using with OpenCV cv2 API
'''
# buffer length in pix object is expressed in bytes, so we
# always use np.uint8 to read it
buf = np.frombuffer(pix.get_buffer(), np.uint8)
buf.shape = pix.get_buffer_shape()
return buf
def test_array(w, h, d):
# create an array
original = np.linspace(0, 255, w*h*d).astype(np.uint8)
# reshape
original.shape = (w, h, d)
# convert to pix
pix = tesseract.Pix.from_buffer(original)
# can help determine which part of the conversion is failing
#pix.write('tmp.tif')
#copy = cv2.imread('tmp.tif')
# convert back
copy = np_from_pix(pix)
# compare
if not np.all(copy == original):
print original[:, :, d-1]
print
print copy[:, :, d-1]
raise RuntimeError("Error: do not match: %s %s %s" % (w, h, d))
if __name__ == '__main__':
np.set_printoptions(formatter={'int': lambda x: '%02x' % x})
if True:
for w in xrange(1, 75):
for h in xrange(1, 75):
for d in (1, 3):
test_array(w, h, d)
else:
test_array(10, 10, 4)
print "All tests passed"
exit(0)
|
Add unit test to check pix conversion routines
|
Add unit test to check pix conversion routines
|
Python
|
apache-2.0
|
virtuald/python-tesseract-sip,cookbrite/python-tesseract-sip,cookbrite/python-tesseract-sip,virtuald/python-tesseract-sip
|
Add unit test to check pix conversion routines
|
#!/usr/bin/env python
#
# Test program to ensure that the Pix to/from numpy conversion routines are
# actually functioning as we think they're functioning
#
import tesseract_sip as tesseract
import numpy as np
def np_from_pix(pix):
'''
Converts a leptonica Pix object into a numpy array suitable
for using with OpenCV cv2 API
'''
# buffer length in pix object is expressed in bytes, so we
# always use np.uint8 to read it
buf = np.frombuffer(pix.get_buffer(), np.uint8)
buf.shape = pix.get_buffer_shape()
return buf
def test_array(w, h, d):
# create an array
original = np.linspace(0, 255, w*h*d).astype(np.uint8)
# reshape
original.shape = (w, h, d)
# convert to pix
pix = tesseract.Pix.from_buffer(original)
# can help determine which part of the conversion is failing
#pix.write('tmp.tif')
#copy = cv2.imread('tmp.tif')
# convert back
copy = np_from_pix(pix)
# compare
if not np.all(copy == original):
print original[:, :, d-1]
print
print copy[:, :, d-1]
raise RuntimeError("Error: do not match: %s %s %s" % (w, h, d))
if __name__ == '__main__':
np.set_printoptions(formatter={'int': lambda x: '%02x' % x})
if True:
for w in xrange(1, 75):
for h in xrange(1, 75):
for d in (1, 3):
test_array(w, h, d)
else:
test_array(10, 10, 4)
print "All tests passed"
exit(0)
|
<commit_before><commit_msg>Add unit test to check pix conversion routines<commit_after>
|
#!/usr/bin/env python
#
# Test program to ensure that the Pix to/from numpy conversion routines are
# actually functioning as we think they're functioning
#
import tesseract_sip as tesseract
import numpy as np
def np_from_pix(pix):
'''
Converts a leptonica Pix object into a numpy array suitable
for using with OpenCV cv2 API
'''
# buffer length in pix object is expressed in bytes, so we
# always use np.uint8 to read it
buf = np.frombuffer(pix.get_buffer(), np.uint8)
buf.shape = pix.get_buffer_shape()
return buf
def test_array(w, h, d):
# create an array
original = np.linspace(0, 255, w*h*d).astype(np.uint8)
# reshape
original.shape = (w, h, d)
# convert to pix
pix = tesseract.Pix.from_buffer(original)
# can help determine which part of the conversion is failing
#pix.write('tmp.tif')
#copy = cv2.imread('tmp.tif')
# convert back
copy = np_from_pix(pix)
# compare
if not np.all(copy == original):
print original[:, :, d-1]
print
print copy[:, :, d-1]
raise RuntimeError("Error: do not match: %s %s %s" % (w, h, d))
if __name__ == '__main__':
np.set_printoptions(formatter={'int': lambda x: '%02x' % x})
if True:
for w in xrange(1, 75):
for h in xrange(1, 75):
for d in (1, 3):
test_array(w, h, d)
else:
test_array(10, 10, 4)
print "All tests passed"
exit(0)
|
Add unit test to check pix conversion routines#!/usr/bin/env python
#
# Test program to ensure that the Pix to/from numpy conversion routines are
# actually functioning as we think they're functioning
#
import tesseract_sip as tesseract
import numpy as np
def np_from_pix(pix):
'''
Converts a leptonica Pix object into a numpy array suitable
for using with OpenCV cv2 API
'''
# buffer length in pix object is expressed in bytes, so we
# always use np.uint8 to read it
buf = np.frombuffer(pix.get_buffer(), np.uint8)
buf.shape = pix.get_buffer_shape()
return buf
def test_array(w, h, d):
# create an array
original = np.linspace(0, 255, w*h*d).astype(np.uint8)
# reshape
original.shape = (w, h, d)
# convert to pix
pix = tesseract.Pix.from_buffer(original)
# can help determine which part of the conversion is failing
#pix.write('tmp.tif')
#copy = cv2.imread('tmp.tif')
# convert back
copy = np_from_pix(pix)
# compare
if not np.all(copy == original):
print original[:, :, d-1]
print
print copy[:, :, d-1]
raise RuntimeError("Error: do not match: %s %s %s" % (w, h, d))
if __name__ == '__main__':
np.set_printoptions(formatter={'int': lambda x: '%02x' % x})
if True:
for w in xrange(1, 75):
for h in xrange(1, 75):
for d in (1, 3):
test_array(w, h, d)
else:
test_array(10, 10, 4)
print "All tests passed"
exit(0)
|
<commit_before><commit_msg>Add unit test to check pix conversion routines<commit_after>#!/usr/bin/env python
#
# Test program to ensure that the Pix to/from numpy conversion routines are
# actually functioning as we think they're functioning
#
import tesseract_sip as tesseract
import numpy as np
def np_from_pix(pix):
'''
Converts a leptonica Pix object into a numpy array suitable
for using with OpenCV cv2 API
'''
# buffer length in pix object is expressed in bytes, so we
# always use np.uint8 to read it
buf = np.frombuffer(pix.get_buffer(), np.uint8)
buf.shape = pix.get_buffer_shape()
return buf
def test_array(w, h, d):
# create an array
original = np.linspace(0, 255, w*h*d).astype(np.uint8)
# reshape
original.shape = (w, h, d)
# convert to pix
pix = tesseract.Pix.from_buffer(original)
# can help determine which part of the conversion is failing
#pix.write('tmp.tif')
#copy = cv2.imread('tmp.tif')
# convert back
copy = np_from_pix(pix)
# compare
if not np.all(copy == original):
print original[:, :, d-1]
print
print copy[:, :, d-1]
raise RuntimeError("Error: do not match: %s %s %s" % (w, h, d))
if __name__ == '__main__':
np.set_printoptions(formatter={'int': lambda x: '%02x' % x})
if True:
for w in xrange(1, 75):
for h in xrange(1, 75):
for d in (1, 3):
test_array(w, h, d)
else:
test_array(10, 10, 4)
print "All tests passed"
exit(0)
|
|
66ee31b1cc8d3921eb9c34725e91c08297f33cf0
|
tests/functional/registration/test_version.py
|
tests/functional/registration/test_version.py
|
"""
Test `version` command.
"""
import os
import re
from pkg_resources import parse_version
from textx.cli import textx
from click.testing import CliRunner
def test_version_command():
runner = CliRunner()
result = runner.invoke(textx, ['version'])
assert result.exit_code == 0
assert result.output.startswith('textX')
version_text = result.output.split()[-1]
version = parse_version(version_text)
assert version.__class__.__name__ == 'Version'
|
Add test for `version` command
|
Add test for `version` command
|
Python
|
mit
|
igordejanovic/textX,igordejanovic/textX,igordejanovic/textX
|
Add test for `version` command
|
"""
Test `version` command.
"""
import os
import re
from pkg_resources import parse_version
from textx.cli import textx
from click.testing import CliRunner
def test_version_command():
runner = CliRunner()
result = runner.invoke(textx, ['version'])
assert result.exit_code == 0
assert result.output.startswith('textX')
version_text = result.output.split()[-1]
version = parse_version(version_text)
assert version.__class__.__name__ == 'Version'
|
<commit_before><commit_msg>Add test for `version` command<commit_after>
|
"""
Test `version` command.
"""
import os
import re
from pkg_resources import parse_version
from textx.cli import textx
from click.testing import CliRunner
def test_version_command():
runner = CliRunner()
result = runner.invoke(textx, ['version'])
assert result.exit_code == 0
assert result.output.startswith('textX')
version_text = result.output.split()[-1]
version = parse_version(version_text)
assert version.__class__.__name__ == 'Version'
|
Add test for `version` command"""
Test `version` command.
"""
import os
import re
from pkg_resources import parse_version
from textx.cli import textx
from click.testing import CliRunner
def test_version_command():
runner = CliRunner()
result = runner.invoke(textx, ['version'])
assert result.exit_code == 0
assert result.output.startswith('textX')
version_text = result.output.split()[-1]
version = parse_version(version_text)
assert version.__class__.__name__ == 'Version'
|
<commit_before><commit_msg>Add test for `version` command<commit_after>"""
Test `version` command.
"""
import os
import re
from pkg_resources import parse_version
from textx.cli import textx
from click.testing import CliRunner
def test_version_command():
runner = CliRunner()
result = runner.invoke(textx, ['version'])
assert result.exit_code == 0
assert result.output.startswith('textX')
version_text = result.output.split()[-1]
version = parse_version(version_text)
assert version.__class__.__name__ == 'Version'
|
|
cd97e8d8f8578abef246f3780b4c0ec10eebc8fa
|
tests/test_WListBox.py
|
tests/test_WListBox.py
|
import unittest
from picotui.widgets import WListBox
from picotui.defs import KEY_DOWN
from picotui.context import Context
class User:
def __init__(self, name, age):
self.name = name
self.age = age
class UserListBox(WListBox):
def __init__(self, width, height, items):
super().__init__(w=width, h=height, items=items)
def render_line(self, user):
return user.name
class WListBoxTest(unittest.TestCase):
def test_handle_key_with_custom_type_of_items(self):
with Context():
users = [User('admin', 30), User('root', 27)]
widget = UserListBox(width=5, height=5, items=users)
self.assertIsNone(widget.handle_key(KEY_DOWN))
|
Add test for rendering WListBox in case of non-str content.
|
tests: Add test for rendering WListBox in case of non-str content.
|
Python
|
mit
|
pfalcon/picotui
|
tests: Add test for rendering WListBox in case of non-str content.
|
import unittest
from picotui.widgets import WListBox
from picotui.defs import KEY_DOWN
from picotui.context import Context
class User:
def __init__(self, name, age):
self.name = name
self.age = age
class UserListBox(WListBox):
def __init__(self, width, height, items):
super().__init__(w=width, h=height, items=items)
def render_line(self, user):
return user.name
class WListBoxTest(unittest.TestCase):
def test_handle_key_with_custom_type_of_items(self):
with Context():
users = [User('admin', 30), User('root', 27)]
widget = UserListBox(width=5, height=5, items=users)
self.assertIsNone(widget.handle_key(KEY_DOWN))
|
<commit_before><commit_msg>tests: Add test for rendering WListBox in case of non-str content.<commit_after>
|
import unittest
from picotui.widgets import WListBox
from picotui.defs import KEY_DOWN
from picotui.context import Context
class User:
def __init__(self, name, age):
self.name = name
self.age = age
class UserListBox(WListBox):
def __init__(self, width, height, items):
super().__init__(w=width, h=height, items=items)
def render_line(self, user):
return user.name
class WListBoxTest(unittest.TestCase):
def test_handle_key_with_custom_type_of_items(self):
with Context():
users = [User('admin', 30), User('root', 27)]
widget = UserListBox(width=5, height=5, items=users)
self.assertIsNone(widget.handle_key(KEY_DOWN))
|
tests: Add test for rendering WListBox in case of non-str content.import unittest
from picotui.widgets import WListBox
from picotui.defs import KEY_DOWN
from picotui.context import Context
class User:
def __init__(self, name, age):
self.name = name
self.age = age
class UserListBox(WListBox):
def __init__(self, width, height, items):
super().__init__(w=width, h=height, items=items)
def render_line(self, user):
return user.name
class WListBoxTest(unittest.TestCase):
def test_handle_key_with_custom_type_of_items(self):
with Context():
users = [User('admin', 30), User('root', 27)]
widget = UserListBox(width=5, height=5, items=users)
self.assertIsNone(widget.handle_key(KEY_DOWN))
|
<commit_before><commit_msg>tests: Add test for rendering WListBox in case of non-str content.<commit_after>import unittest
from picotui.widgets import WListBox
from picotui.defs import KEY_DOWN
from picotui.context import Context
class User:
def __init__(self, name, age):
self.name = name
self.age = age
class UserListBox(WListBox):
def __init__(self, width, height, items):
super().__init__(w=width, h=height, items=items)
def render_line(self, user):
return user.name
class WListBoxTest(unittest.TestCase):
def test_handle_key_with_custom_type_of_items(self):
with Context():
users = [User('admin', 30), User('root', 27)]
widget = UserListBox(width=5, height=5, items=users)
self.assertIsNone(widget.handle_key(KEY_DOWN))
|
|
8c93e873a71d19f390c69c6774b92f28dc0110de
|
tests/test_defaults.py
|
tests/test_defaults.py
|
from logstapo import defaults
def test_defaults():
# in case you wonder why there's a test for this:
# changing the default config file path would break invocations
# where the config file path is not specified so it should be
# considered immutable
assert defaults.CONFIG_FILE_PATH == '/etc/logstapo.yml'
|
Cover default.py with a test
|
Cover default.py with a test
|
Python
|
mit
|
ThiefMaster/logstapo
|
Cover default.py with a test
|
from logstapo import defaults
def test_defaults():
# in case you wonder why there's a test for this:
# changing the default config file path would break invocations
# where the config file path is not specified so it should be
# considered immutable
assert defaults.CONFIG_FILE_PATH == '/etc/logstapo.yml'
|
<commit_before><commit_msg>Cover default.py with a test<commit_after>
|
from logstapo import defaults
def test_defaults():
# in case you wonder why there's a test for this:
# changing the default config file path would break invocations
# where the config file path is not specified so it should be
# considered immutable
assert defaults.CONFIG_FILE_PATH == '/etc/logstapo.yml'
|
Cover default.py with a testfrom logstapo import defaults
def test_defaults():
# in case you wonder why there's a test for this:
# changing the default config file path would break invocations
# where the config file path is not specified so it should be
# considered immutable
assert defaults.CONFIG_FILE_PATH == '/etc/logstapo.yml'
|
<commit_before><commit_msg>Cover default.py with a test<commit_after>from logstapo import defaults
def test_defaults():
# in case you wonder why there's a test for this:
# changing the default config file path would break invocations
# where the config file path is not specified so it should be
# considered immutable
assert defaults.CONFIG_FILE_PATH == '/etc/logstapo.yml'
|
|
1889e03c139e0fb66d4241aa10b29345ef3bde5b
|
python_src/SerialUDPBridge.py
|
python_src/SerialUDPBridge.py
|
import serial #Serial port API http://pyserial.sourceforge.net/pyserial_api.html
import socket
import time
from threading import Thread
def recvUDP(sock,SerialIOArduino):
while True:
data, addr = sock.recvfrom(1280) # Max recieve size is 1280 bytes
print "UDP received message:", data.strip()
SerialIOArduino.write(data)
port = "/dev/ttyACM0"
UDP_IP = "127.0.0.1"
UDP_PORT = 9050
print "UDP target IP:", UDP_IP
print "UDP target port:", UDP_PORT
sock = socket.socket(socket.AF_INET, # Internet protocol
socket.SOCK_DGRAM) # User Datagram (UDP)
sock.bind("0.0.0.0", UDP_PORT) # Listen on all adapters
SerialIOArduino = serial.Serial(port,9600) # setup the serial port and baudrate
SerialIOArduino.flushInput() # Remove old input's
t = Thread(target=recvUDP,args=(sock,SerialIOArduino,))
t.daemon=True # Stop thread when program ends
t.start()
while True:
if (SerialIOArduino.inWaiting() > 0):
inputLine = SerialIOArduino.readline().strip() # read a '\n' terminated line()
# Send the csv string as a UDP message
sock.sendto(inputLine, (UDP_IP, UDP_PORT))
|
Send the messages from the serial port as UDP messages to port 9050 Recieves UDP messages on port 9050 and sends them over the serial line
|
Send the messages from the serial port as UDP messages to port 9050
Recieves UDP messages on port 9050 and sends them over the serial line
|
Python
|
mit
|
rlangoy/socLabWeek43
|
Send the messages from the serial port as UDP messages to port 9050
Recieves UDP messages on port 9050 and sends them over the serial line
|
import serial #Serial port API http://pyserial.sourceforge.net/pyserial_api.html
import socket
import time
from threading import Thread
def recvUDP(sock,SerialIOArduino):
while True:
data, addr = sock.recvfrom(1280) # Max recieve size is 1280 bytes
print "UDP received message:", data.strip()
SerialIOArduino.write(data)
port = "/dev/ttyACM0"
UDP_IP = "127.0.0.1"
UDP_PORT = 9050
print "UDP target IP:", UDP_IP
print "UDP target port:", UDP_PORT
sock = socket.socket(socket.AF_INET, # Internet protocol
socket.SOCK_DGRAM) # User Datagram (UDP)
sock.bind("0.0.0.0", UDP_PORT) # Listen on all adapters
SerialIOArduino = serial.Serial(port,9600) # setup the serial port and baudrate
SerialIOArduino.flushInput() # Remove old input's
t = Thread(target=recvUDP,args=(sock,SerialIOArduino,))
t.daemon=True # Stop thread when program ends
t.start()
while True:
if (SerialIOArduino.inWaiting() > 0):
inputLine = SerialIOArduino.readline().strip() # read a '\n' terminated line()
# Send the csv string as a UDP message
sock.sendto(inputLine, (UDP_IP, UDP_PORT))
|
<commit_before><commit_msg>Send the messages from the serial port as UDP messages to port 9050
Recieves UDP messages on port 9050 and sends them over the serial line<commit_after>
|
import serial #Serial port API http://pyserial.sourceforge.net/pyserial_api.html
import socket
import time
from threading import Thread
def recvUDP(sock,SerialIOArduino):
while True:
data, addr = sock.recvfrom(1280) # Max recieve size is 1280 bytes
print "UDP received message:", data.strip()
SerialIOArduino.write(data)
port = "/dev/ttyACM0"
UDP_IP = "127.0.0.1"
UDP_PORT = 9050
print "UDP target IP:", UDP_IP
print "UDP target port:", UDP_PORT
sock = socket.socket(socket.AF_INET, # Internet protocol
socket.SOCK_DGRAM) # User Datagram (UDP)
sock.bind("0.0.0.0", UDP_PORT) # Listen on all adapters
SerialIOArduino = serial.Serial(port,9600) # setup the serial port and baudrate
SerialIOArduino.flushInput() # Remove old input's
t = Thread(target=recvUDP,args=(sock,SerialIOArduino,))
t.daemon=True # Stop thread when program ends
t.start()
while True:
if (SerialIOArduino.inWaiting() > 0):
inputLine = SerialIOArduino.readline().strip() # read a '\n' terminated line()
# Send the csv string as a UDP message
sock.sendto(inputLine, (UDP_IP, UDP_PORT))
|
Send the messages from the serial port as UDP messages to port 9050
Recieves UDP messages on port 9050 and sends them over the serial lineimport serial #Serial port API http://pyserial.sourceforge.net/pyserial_api.html
import socket
import time
from threading import Thread
def recvUDP(sock,SerialIOArduino):
while True:
data, addr = sock.recvfrom(1280) # Max recieve size is 1280 bytes
print "UDP received message:", data.strip()
SerialIOArduino.write(data)
port = "/dev/ttyACM0"
UDP_IP = "127.0.0.1"
UDP_PORT = 9050
print "UDP target IP:", UDP_IP
print "UDP target port:", UDP_PORT
sock = socket.socket(socket.AF_INET, # Internet protocol
socket.SOCK_DGRAM) # User Datagram (UDP)
sock.bind("0.0.0.0", UDP_PORT) # Listen on all adapters
SerialIOArduino = serial.Serial(port,9600) # setup the serial port and baudrate
SerialIOArduino.flushInput() # Remove old input's
t = Thread(target=recvUDP,args=(sock,SerialIOArduino,))
t.daemon=True # Stop thread when program ends
t.start()
while True:
if (SerialIOArduino.inWaiting() > 0):
inputLine = SerialIOArduino.readline().strip() # read a '\n' terminated line()
# Send the csv string as a UDP message
sock.sendto(inputLine, (UDP_IP, UDP_PORT))
|
<commit_before><commit_msg>Send the messages from the serial port as UDP messages to port 9050
Recieves UDP messages on port 9050 and sends them over the serial line<commit_after>import serial #Serial port API http://pyserial.sourceforge.net/pyserial_api.html
import socket
import time
from threading import Thread
def recvUDP(sock,SerialIOArduino):
while True:
data, addr = sock.recvfrom(1280) # Max recieve size is 1280 bytes
print "UDP received message:", data.strip()
SerialIOArduino.write(data)
port = "/dev/ttyACM0"
UDP_IP = "127.0.0.1"
UDP_PORT = 9050
print "UDP target IP:", UDP_IP
print "UDP target port:", UDP_PORT
sock = socket.socket(socket.AF_INET, # Internet protocol
socket.SOCK_DGRAM) # User Datagram (UDP)
sock.bind("0.0.0.0", UDP_PORT) # Listen on all adapters
SerialIOArduino = serial.Serial(port,9600) # setup the serial port and baudrate
SerialIOArduino.flushInput() # Remove old input's
t = Thread(target=recvUDP,args=(sock,SerialIOArduino,))
t.daemon=True # Stop thread when program ends
t.start()
while True:
if (SerialIOArduino.inWaiting() > 0):
inputLine = SerialIOArduino.readline().strip() # read a '\n' terminated line()
# Send the csv string as a UDP message
sock.sendto(inputLine, (UDP_IP, UDP_PORT))
|
|
a60e7b34a5c2f0a80f30ae7fa61efe507cd66161
|
tests.py
|
tests.py
|
# -*- coding: utf-8 -*-
import os
import unittest
import tempfile
from datetime import datetime
from flask.ext.sqlalchemy import SQLAlchemy
from app import app, db, Pass, Registration
class PassbookTestCase(unittest.TestCase):
def setUp(self):
temp = tempfile.mkstemp()
self.temp = temp
self.db_fd = temp[0]
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///{}'.format(temp[1])
app.config['TESTING'] = True
self.app = app.test_client()
SQLAlchemy.create_all(db)
def tearDown(self):
os.close(self.db_fd)
os.unlink(app.config['SQLALCHEMY_DATABASE_URI'].replace('sqlite:///', ''))
def test_add_pass_and_registrations(self):
data = {
'foo': 57,
'bar': str(datetime.utcnow()),
'baz': 'Lorem ipsum dolar sit amet'
}
p = Pass('com.company.pass.example', 'ABC123', data)
db.session.add(p)
db.session.commit()
assert Pass.query.get(1)
r = Registration('123456789', '00000000 00000000 00000000 00000000 \
00000000 00000000 00000000 00000000', p)
db.session.add(r)
db.session.commit()
assert Registration.query.get(1)
if __name__ == '__main__':
unittest.main()
|
Add a basic test case.
|
Add a basic test case.
|
Python
|
mit
|
renstrom/passbook_flask_example
|
Add a basic test case.
|
# -*- coding: utf-8 -*-
import os
import unittest
import tempfile
from datetime import datetime
from flask.ext.sqlalchemy import SQLAlchemy
from app import app, db, Pass, Registration
class PassbookTestCase(unittest.TestCase):
def setUp(self):
temp = tempfile.mkstemp()
self.temp = temp
self.db_fd = temp[0]
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///{}'.format(temp[1])
app.config['TESTING'] = True
self.app = app.test_client()
SQLAlchemy.create_all(db)
def tearDown(self):
os.close(self.db_fd)
os.unlink(app.config['SQLALCHEMY_DATABASE_URI'].replace('sqlite:///', ''))
def test_add_pass_and_registrations(self):
data = {
'foo': 57,
'bar': str(datetime.utcnow()),
'baz': 'Lorem ipsum dolar sit amet'
}
p = Pass('com.company.pass.example', 'ABC123', data)
db.session.add(p)
db.session.commit()
assert Pass.query.get(1)
r = Registration('123456789', '00000000 00000000 00000000 00000000 \
00000000 00000000 00000000 00000000', p)
db.session.add(r)
db.session.commit()
assert Registration.query.get(1)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add a basic test case.<commit_after>
|
# -*- coding: utf-8 -*-
import os
import unittest
import tempfile
from datetime import datetime
from flask.ext.sqlalchemy import SQLAlchemy
from app import app, db, Pass, Registration
class PassbookTestCase(unittest.TestCase):
def setUp(self):
temp = tempfile.mkstemp()
self.temp = temp
self.db_fd = temp[0]
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///{}'.format(temp[1])
app.config['TESTING'] = True
self.app = app.test_client()
SQLAlchemy.create_all(db)
def tearDown(self):
os.close(self.db_fd)
os.unlink(app.config['SQLALCHEMY_DATABASE_URI'].replace('sqlite:///', ''))
def test_add_pass_and_registrations(self):
data = {
'foo': 57,
'bar': str(datetime.utcnow()),
'baz': 'Lorem ipsum dolar sit amet'
}
p = Pass('com.company.pass.example', 'ABC123', data)
db.session.add(p)
db.session.commit()
assert Pass.query.get(1)
r = Registration('123456789', '00000000 00000000 00000000 00000000 \
00000000 00000000 00000000 00000000', p)
db.session.add(r)
db.session.commit()
assert Registration.query.get(1)
if __name__ == '__main__':
unittest.main()
|
Add a basic test case.# -*- coding: utf-8 -*-
import os
import unittest
import tempfile
from datetime import datetime
from flask.ext.sqlalchemy import SQLAlchemy
from app import app, db, Pass, Registration
class PassbookTestCase(unittest.TestCase):
def setUp(self):
temp = tempfile.mkstemp()
self.temp = temp
self.db_fd = temp[0]
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///{}'.format(temp[1])
app.config['TESTING'] = True
self.app = app.test_client()
SQLAlchemy.create_all(db)
def tearDown(self):
os.close(self.db_fd)
os.unlink(app.config['SQLALCHEMY_DATABASE_URI'].replace('sqlite:///', ''))
def test_add_pass_and_registrations(self):
data = {
'foo': 57,
'bar': str(datetime.utcnow()),
'baz': 'Lorem ipsum dolar sit amet'
}
p = Pass('com.company.pass.example', 'ABC123', data)
db.session.add(p)
db.session.commit()
assert Pass.query.get(1)
r = Registration('123456789', '00000000 00000000 00000000 00000000 \
00000000 00000000 00000000 00000000', p)
db.session.add(r)
db.session.commit()
assert Registration.query.get(1)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add a basic test case.<commit_after># -*- coding: utf-8 -*-
import os
import unittest
import tempfile
from datetime import datetime
from flask.ext.sqlalchemy import SQLAlchemy
from app import app, db, Pass, Registration
class PassbookTestCase(unittest.TestCase):
def setUp(self):
temp = tempfile.mkstemp()
self.temp = temp
self.db_fd = temp[0]
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///{}'.format(temp[1])
app.config['TESTING'] = True
self.app = app.test_client()
SQLAlchemy.create_all(db)
def tearDown(self):
os.close(self.db_fd)
os.unlink(app.config['SQLALCHEMY_DATABASE_URI'].replace('sqlite:///', ''))
def test_add_pass_and_registrations(self):
data = {
'foo': 57,
'bar': str(datetime.utcnow()),
'baz': 'Lorem ipsum dolar sit amet'
}
p = Pass('com.company.pass.example', 'ABC123', data)
db.session.add(p)
db.session.commit()
assert Pass.query.get(1)
r = Registration('123456789', '00000000 00000000 00000000 00000000 \
00000000 00000000 00000000 00000000', p)
db.session.add(r)
db.session.commit()
assert Registration.query.get(1)
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.