commit
stringlengths 40
40
| old_file
stringlengths 4
118
| new_file
stringlengths 4
118
| old_contents
stringlengths 0
2.94k
| new_contents
stringlengths 1
4.43k
| subject
stringlengths 15
444
| message
stringlengths 16
3.45k
| lang
stringclasses 1
value | license
stringclasses 13
values | repos
stringlengths 5
43.2k
| prompt
stringlengths 17
4.58k
| response
stringlengths 1
4.43k
| prompt_tagged
stringlengths 58
4.62k
| response_tagged
stringlengths 1
4.43k
| text
stringlengths 132
7.29k
| text_tagged
stringlengths 173
7.33k
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
adb6ce275e1cbc2d000286e169a4a96b25b32dbb
|
test_doc.py
|
test_doc.py
|
#!/usr/bin/env python
import doctest
import sys
if hasattr(doctest, "testfile"):
print("=== Test file: README ===")
failure, tests = doctest.testfile('README', optionflags=doctest.ELLIPSIS)
if failure:
sys.exit(1)
print("=== Test file: test.rst ===")
failure, tests = doctest.testfile('test/test.rst', optionflags=doctest.ELLIPSIS)
if failure:
sys.exit(1)
print("=== Test IPy module ===")
import IPy
failure, tests = doctest.testmod(IPy)
if failure:
sys.exit(1)
else:
sys.stderr.write("WARNING: doctest has no function testfile (before Python 2.4), unable to check README\n")
|
#!/usr/bin/env python
import doctest
import sys
if hasattr(doctest, "testfile"):
total_failures, total_tests = (0, 0)
print("=== Test file: README ===")
failure, tests = doctest.testfile('README', optionflags=doctest.ELLIPSIS)
total_failures += failure
total_tests += tests
print("=== Test file: test.rst ===")
failure, tests = doctest.testfile('test/test.rst', optionflags=doctest.ELLIPSIS)
total_failures += failure
total_tests += tests
print("=== Test IPy module ===")
import IPy
failure, tests = doctest.testmod(IPy)
total_failures += failure
total_tests += tests
print("=== Overall Results ===")
print("total tests %d, failures %d" % (total_tests, total_failures))
if total_failures:
sys.exit(1)
else:
sys.stderr.write("WARNING: doctest has no function testfile (before Python 2.4), unable to check README\n")
|
Allow doctest runner to keep going after failures
|
Allow doctest runner to keep going after failures
It will still return an error code, but there is little need to halt the
running of the three different doctest modules if an early one fails,
which may in fact mask the real reason for failure in an IPy internal
method.
Signed-off-by: Dan McGee <a6e5737275ff1276377ee261739f3ee963671241@gmail.com>
|
Python
|
bsd-3-clause
|
dstam/python-ipy,sigma-random/python-ipy
|
#!/usr/bin/env python
import doctest
import sys
if hasattr(doctest, "testfile"):
print("=== Test file: README ===")
failure, tests = doctest.testfile('README', optionflags=doctest.ELLIPSIS)
if failure:
sys.exit(1)
print("=== Test file: test.rst ===")
failure, tests = doctest.testfile('test/test.rst', optionflags=doctest.ELLIPSIS)
if failure:
sys.exit(1)
print("=== Test IPy module ===")
import IPy
failure, tests = doctest.testmod(IPy)
if failure:
sys.exit(1)
else:
sys.stderr.write("WARNING: doctest has no function testfile (before Python 2.4), unable to check README\n")
Allow doctest runner to keep going after failures
It will still return an error code, but there is little need to halt the
running of the three different doctest modules if an early one fails,
which may in fact mask the real reason for failure in an IPy internal
method.
Signed-off-by: Dan McGee <a6e5737275ff1276377ee261739f3ee963671241@gmail.com>
|
#!/usr/bin/env python
import doctest
import sys
if hasattr(doctest, "testfile"):
total_failures, total_tests = (0, 0)
print("=== Test file: README ===")
failure, tests = doctest.testfile('README', optionflags=doctest.ELLIPSIS)
total_failures += failure
total_tests += tests
print("=== Test file: test.rst ===")
failure, tests = doctest.testfile('test/test.rst', optionflags=doctest.ELLIPSIS)
total_failures += failure
total_tests += tests
print("=== Test IPy module ===")
import IPy
failure, tests = doctest.testmod(IPy)
total_failures += failure
total_tests += tests
print("=== Overall Results ===")
print("total tests %d, failures %d" % (total_tests, total_failures))
if total_failures:
sys.exit(1)
else:
sys.stderr.write("WARNING: doctest has no function testfile (before Python 2.4), unable to check README\n")
|
<commit_before>#!/usr/bin/env python
import doctest
import sys
if hasattr(doctest, "testfile"):
print("=== Test file: README ===")
failure, tests = doctest.testfile('README', optionflags=doctest.ELLIPSIS)
if failure:
sys.exit(1)
print("=== Test file: test.rst ===")
failure, tests = doctest.testfile('test/test.rst', optionflags=doctest.ELLIPSIS)
if failure:
sys.exit(1)
print("=== Test IPy module ===")
import IPy
failure, tests = doctest.testmod(IPy)
if failure:
sys.exit(1)
else:
sys.stderr.write("WARNING: doctest has no function testfile (before Python 2.4), unable to check README\n")
<commit_msg>Allow doctest runner to keep going after failures
It will still return an error code, but there is little need to halt the
running of the three different doctest modules if an early one fails,
which may in fact mask the real reason for failure in an IPy internal
method.
Signed-off-by: Dan McGee <a6e5737275ff1276377ee261739f3ee963671241@gmail.com><commit_after>
|
#!/usr/bin/env python
import doctest
import sys
if hasattr(doctest, "testfile"):
total_failures, total_tests = (0, 0)
print("=== Test file: README ===")
failure, tests = doctest.testfile('README', optionflags=doctest.ELLIPSIS)
total_failures += failure
total_tests += tests
print("=== Test file: test.rst ===")
failure, tests = doctest.testfile('test/test.rst', optionflags=doctest.ELLIPSIS)
total_failures += failure
total_tests += tests
print("=== Test IPy module ===")
import IPy
failure, tests = doctest.testmod(IPy)
total_failures += failure
total_tests += tests
print("=== Overall Results ===")
print("total tests %d, failures %d" % (total_tests, total_failures))
if total_failures:
sys.exit(1)
else:
sys.stderr.write("WARNING: doctest has no function testfile (before Python 2.4), unable to check README\n")
|
#!/usr/bin/env python
import doctest
import sys
if hasattr(doctest, "testfile"):
print("=== Test file: README ===")
failure, tests = doctest.testfile('README', optionflags=doctest.ELLIPSIS)
if failure:
sys.exit(1)
print("=== Test file: test.rst ===")
failure, tests = doctest.testfile('test/test.rst', optionflags=doctest.ELLIPSIS)
if failure:
sys.exit(1)
print("=== Test IPy module ===")
import IPy
failure, tests = doctest.testmod(IPy)
if failure:
sys.exit(1)
else:
sys.stderr.write("WARNING: doctest has no function testfile (before Python 2.4), unable to check README\n")
Allow doctest runner to keep going after failures
It will still return an error code, but there is little need to halt the
running of the three different doctest modules if an early one fails,
which may in fact mask the real reason for failure in an IPy internal
method.
Signed-off-by: Dan McGee <a6e5737275ff1276377ee261739f3ee963671241@gmail.com>#!/usr/bin/env python
import doctest
import sys
if hasattr(doctest, "testfile"):
total_failures, total_tests = (0, 0)
print("=== Test file: README ===")
failure, tests = doctest.testfile('README', optionflags=doctest.ELLIPSIS)
total_failures += failure
total_tests += tests
print("=== Test file: test.rst ===")
failure, tests = doctest.testfile('test/test.rst', optionflags=doctest.ELLIPSIS)
total_failures += failure
total_tests += tests
print("=== Test IPy module ===")
import IPy
failure, tests = doctest.testmod(IPy)
total_failures += failure
total_tests += tests
print("=== Overall Results ===")
print("total tests %d, failures %d" % (total_tests, total_failures))
if total_failures:
sys.exit(1)
else:
sys.stderr.write("WARNING: doctest has no function testfile (before Python 2.4), unable to check README\n")
|
<commit_before>#!/usr/bin/env python
import doctest
import sys
if hasattr(doctest, "testfile"):
print("=== Test file: README ===")
failure, tests = doctest.testfile('README', optionflags=doctest.ELLIPSIS)
if failure:
sys.exit(1)
print("=== Test file: test.rst ===")
failure, tests = doctest.testfile('test/test.rst', optionflags=doctest.ELLIPSIS)
if failure:
sys.exit(1)
print("=== Test IPy module ===")
import IPy
failure, tests = doctest.testmod(IPy)
if failure:
sys.exit(1)
else:
sys.stderr.write("WARNING: doctest has no function testfile (before Python 2.4), unable to check README\n")
<commit_msg>Allow doctest runner to keep going after failures
It will still return an error code, but there is little need to halt the
running of the three different doctest modules if an early one fails,
which may in fact mask the real reason for failure in an IPy internal
method.
Signed-off-by: Dan McGee <a6e5737275ff1276377ee261739f3ee963671241@gmail.com><commit_after>#!/usr/bin/env python
import doctest
import sys
if hasattr(doctest, "testfile"):
total_failures, total_tests = (0, 0)
print("=== Test file: README ===")
failure, tests = doctest.testfile('README', optionflags=doctest.ELLIPSIS)
total_failures += failure
total_tests += tests
print("=== Test file: test.rst ===")
failure, tests = doctest.testfile('test/test.rst', optionflags=doctest.ELLIPSIS)
total_failures += failure
total_tests += tests
print("=== Test IPy module ===")
import IPy
failure, tests = doctest.testmod(IPy)
total_failures += failure
total_tests += tests
print("=== Overall Results ===")
print("total tests %d, failures %d" % (total_tests, total_failures))
if total_failures:
sys.exit(1)
else:
sys.stderr.write("WARNING: doctest has no function testfile (before Python 2.4), unable to check README\n")
|
4318c0275b1c90fc65aff5ce59eaf101b78fba31
|
scripts/award_badge_to_user.py
|
scripts/award_badge_to_user.py
|
#!/usr/bin/env python
"""Award a badge to a user.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.user_badge import service as badge_service
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.validators import validate_user_id
from bootstrap.util import app_context
@click.command()
@click.argument('badge_slug')
@click.argument('user', callback=validate_user_id)
def execute(badge_slug, user):
badge = badge_service.find_badge_by_slug(badge_slug)
if badge is None:
raise click.BadParameter('Unknown badge slug "{}".'.format(badge_slug))
click.echo('Awarding badge "{}" to user "{}" ... '
.format(badge.label, user.screen_name), nl=False)
badge_service.award_badge_to_user(badge.id, user.id)
click.secho('done.', fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
Add script to award a badge to a user
|
Add script to award a badge to a user
|
Python
|
bsd-3-clause
|
m-ober/byceps,homeworkprod/byceps,homeworkprod/byceps,homeworkprod/byceps,m-ober/byceps,m-ober/byceps
|
Add script to award a badge to a user
|
#!/usr/bin/env python
"""Award a badge to a user.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.user_badge import service as badge_service
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.validators import validate_user_id
from bootstrap.util import app_context
@click.command()
@click.argument('badge_slug')
@click.argument('user', callback=validate_user_id)
def execute(badge_slug, user):
badge = badge_service.find_badge_by_slug(badge_slug)
if badge is None:
raise click.BadParameter('Unknown badge slug "{}".'.format(badge_slug))
click.echo('Awarding badge "{}" to user "{}" ... '
.format(badge.label, user.screen_name), nl=False)
badge_service.award_badge_to_user(badge.id, user.id)
click.secho('done.', fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
<commit_before><commit_msg>Add script to award a badge to a user<commit_after>
|
#!/usr/bin/env python
"""Award a badge to a user.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.user_badge import service as badge_service
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.validators import validate_user_id
from bootstrap.util import app_context
@click.command()
@click.argument('badge_slug')
@click.argument('user', callback=validate_user_id)
def execute(badge_slug, user):
badge = badge_service.find_badge_by_slug(badge_slug)
if badge is None:
raise click.BadParameter('Unknown badge slug "{}".'.format(badge_slug))
click.echo('Awarding badge "{}" to user "{}" ... '
.format(badge.label, user.screen_name), nl=False)
badge_service.award_badge_to_user(badge.id, user.id)
click.secho('done.', fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
Add script to award a badge to a user#!/usr/bin/env python
"""Award a badge to a user.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.user_badge import service as badge_service
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.validators import validate_user_id
from bootstrap.util import app_context
@click.command()
@click.argument('badge_slug')
@click.argument('user', callback=validate_user_id)
def execute(badge_slug, user):
badge = badge_service.find_badge_by_slug(badge_slug)
if badge is None:
raise click.BadParameter('Unknown badge slug "{}".'.format(badge_slug))
click.echo('Awarding badge "{}" to user "{}" ... '
.format(badge.label, user.screen_name), nl=False)
badge_service.award_badge_to_user(badge.id, user.id)
click.secho('done.', fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
<commit_before><commit_msg>Add script to award a badge to a user<commit_after>#!/usr/bin/env python
"""Award a badge to a user.
:Copyright: 2006-2018 Jochen Kupperschmidt
:License: Modified BSD, see LICENSE for details.
"""
import click
from byceps.services.user_badge import service as badge_service
from byceps.util.system import get_config_filename_from_env_or_exit
from bootstrap.validators import validate_user_id
from bootstrap.util import app_context
@click.command()
@click.argument('badge_slug')
@click.argument('user', callback=validate_user_id)
def execute(badge_slug, user):
badge = badge_service.find_badge_by_slug(badge_slug)
if badge is None:
raise click.BadParameter('Unknown badge slug "{}".'.format(badge_slug))
click.echo('Awarding badge "{}" to user "{}" ... '
.format(badge.label, user.screen_name), nl=False)
badge_service.award_badge_to_user(badge.id, user.id)
click.secho('done.', fg='green')
if __name__ == '__main__':
config_filename = get_config_filename_from_env_or_exit()
with app_context(config_filename):
execute()
|
|
7a98fd7f4f5a3d29aa9b8b182e19d7c8b16bcc4a
|
symposion/reviews/migrations/0002_migrate_messages_to_feedback.py
|
symposion/reviews/migrations/0002_migrate_messages_to_feedback.py
|
# Generated by Django 2.0.13 on 2019-05-13 22:09
from django.db import migrations
def migrate_messages_to_feedback(apps, schema_editor):
"""
Migrate symposion.reviews.models.ProposalMessage to
conf_site.reviews.models.ProposalFeedback.
"""
ProposalMessage = apps.get_model("symposion_reviews", "ProposalMessage")
ProposalFeedback = apps.get_model("reviews", "ProposalFeedback")
# There are unlikely to be so many ProposalMessage objects
# that it would make sense to use `.iterator()` here.
for message in ProposalMessage.objects.all():
ProposalFeedback.objects.create(
proposal=message.proposal.proposal,
author=message.user,
comment=message.message,
comment_html=message.message_html,
)
class Migration(migrations.Migration):
dependencies = [("symposion_reviews", "0001_initial")]
operations = [migrations.RunPython(migrate_messages_to_feedback)]
|
Migrate old proposal messages to new feedback.
|
Migrate old proposal messages to new feedback.
Migrate `symposion.reviews.models.ProposalMessage` objects to
`conf_site.reviews.models.ProposalFeedback` objects.
|
Python
|
mit
|
pydata/conf_site,pydata/conf_site,pydata/conf_site
|
Migrate old proposal messages to new feedback.
Migrate `symposion.reviews.models.ProposalMessage` objects to
`conf_site.reviews.models.ProposalFeedback` objects.
|
# Generated by Django 2.0.13 on 2019-05-13 22:09
from django.db import migrations
def migrate_messages_to_feedback(apps, schema_editor):
"""
Migrate symposion.reviews.models.ProposalMessage to
conf_site.reviews.models.ProposalFeedback.
"""
ProposalMessage = apps.get_model("symposion_reviews", "ProposalMessage")
ProposalFeedback = apps.get_model("reviews", "ProposalFeedback")
# There are unlikely to be so many ProposalMessage objects
# that it would make sense to use `.iterator()` here.
for message in ProposalMessage.objects.all():
ProposalFeedback.objects.create(
proposal=message.proposal.proposal,
author=message.user,
comment=message.message,
comment_html=message.message_html,
)
class Migration(migrations.Migration):
dependencies = [("symposion_reviews", "0001_initial")]
operations = [migrations.RunPython(migrate_messages_to_feedback)]
|
<commit_before><commit_msg>Migrate old proposal messages to new feedback.
Migrate `symposion.reviews.models.ProposalMessage` objects to
`conf_site.reviews.models.ProposalFeedback` objects.<commit_after>
|
# Generated by Django 2.0.13 on 2019-05-13 22:09
from django.db import migrations
def migrate_messages_to_feedback(apps, schema_editor):
"""
Migrate symposion.reviews.models.ProposalMessage to
conf_site.reviews.models.ProposalFeedback.
"""
ProposalMessage = apps.get_model("symposion_reviews", "ProposalMessage")
ProposalFeedback = apps.get_model("reviews", "ProposalFeedback")
# There are unlikely to be so many ProposalMessage objects
# that it would make sense to use `.iterator()` here.
for message in ProposalMessage.objects.all():
ProposalFeedback.objects.create(
proposal=message.proposal.proposal,
author=message.user,
comment=message.message,
comment_html=message.message_html,
)
class Migration(migrations.Migration):
dependencies = [("symposion_reviews", "0001_initial")]
operations = [migrations.RunPython(migrate_messages_to_feedback)]
|
Migrate old proposal messages to new feedback.
Migrate `symposion.reviews.models.ProposalMessage` objects to
`conf_site.reviews.models.ProposalFeedback` objects.# Generated by Django 2.0.13 on 2019-05-13 22:09
from django.db import migrations
def migrate_messages_to_feedback(apps, schema_editor):
"""
Migrate symposion.reviews.models.ProposalMessage to
conf_site.reviews.models.ProposalFeedback.
"""
ProposalMessage = apps.get_model("symposion_reviews", "ProposalMessage")
ProposalFeedback = apps.get_model("reviews", "ProposalFeedback")
# There are unlikely to be so many ProposalMessage objects
# that it would make sense to use `.iterator()` here.
for message in ProposalMessage.objects.all():
ProposalFeedback.objects.create(
proposal=message.proposal.proposal,
author=message.user,
comment=message.message,
comment_html=message.message_html,
)
class Migration(migrations.Migration):
dependencies = [("symposion_reviews", "0001_initial")]
operations = [migrations.RunPython(migrate_messages_to_feedback)]
|
<commit_before><commit_msg>Migrate old proposal messages to new feedback.
Migrate `symposion.reviews.models.ProposalMessage` objects to
`conf_site.reviews.models.ProposalFeedback` objects.<commit_after># Generated by Django 2.0.13 on 2019-05-13 22:09
from django.db import migrations
def migrate_messages_to_feedback(apps, schema_editor):
"""
Migrate symposion.reviews.models.ProposalMessage to
conf_site.reviews.models.ProposalFeedback.
"""
ProposalMessage = apps.get_model("symposion_reviews", "ProposalMessage")
ProposalFeedback = apps.get_model("reviews", "ProposalFeedback")
# There are unlikely to be so many ProposalMessage objects
# that it would make sense to use `.iterator()` here.
for message in ProposalMessage.objects.all():
ProposalFeedback.objects.create(
proposal=message.proposal.proposal,
author=message.user,
comment=message.message,
comment_html=message.message_html,
)
class Migration(migrations.Migration):
dependencies = [("symposion_reviews", "0001_initial")]
operations = [migrations.RunPython(migrate_messages_to_feedback)]
|
|
35f3aa0407b68210abe2e46648ae3a39ee21b17b
|
senlin/tests/tempest/api/receivers/test_receiver_show.py
|
senlin/tests/tempest/api/receivers/test_receiver_show.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestReceiverShow(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestReceiverShow, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(constants.spec_nova_server)
# Create a test cluster
cls.cluster = cls.create_test_cluster(cls.profile['id'],
0, min_size=0, max_size=-1)
# Create receiver
cls.receiver = cls.create_receiver(cls.cluster['id'],
'CLUSTER_RESIZE', 'webhook')
@classmethod
def resource_cleanup(cls):
# Delete receiver
cls.client.delete_obj('receivers', cls.receiver['id'])
# Delete test cluster
cls.delete_test_cluster(cls.cluster['id'])
# Delete profile
cls.delete_profile(cls.profile['id'])
super(TestReceiverShow, cls).resource_cleanup()
@decorators.idempotent_id('6a86b2e4-127a-4acc-b0ec-6f951b240e5b')
def test_show_receiver(self):
res = self.client.get_obj('receivers', self.receiver['id'])
self.assertEqual(200, res['status'])
self.assertIsNone(res['location'])
self.assertIsNotNone(res['body'])
receiver = res['body']
for key in ['action', 'actor', 'channel', 'cluster_id',
'created_at', 'domain', 'id', 'name', 'params',
'project', 'type', 'updated_at', 'user']:
self.assertIn(key, receiver)
|
Add API test for receiver show
|
Add API test for receiver show
Add API test for receiver show
Change-Id: I6d01189cdba772c1c085549a4a9838d43ce39a73
|
Python
|
apache-2.0
|
openstack/senlin,openstack/senlin,stackforge/senlin,openstack/senlin,stackforge/senlin
|
Add API test for receiver show
Add API test for receiver show
Change-Id: I6d01189cdba772c1c085549a4a9838d43ce39a73
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestReceiverShow(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestReceiverShow, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(constants.spec_nova_server)
# Create a test cluster
cls.cluster = cls.create_test_cluster(cls.profile['id'],
0, min_size=0, max_size=-1)
# Create receiver
cls.receiver = cls.create_receiver(cls.cluster['id'],
'CLUSTER_RESIZE', 'webhook')
@classmethod
def resource_cleanup(cls):
# Delete receiver
cls.client.delete_obj('receivers', cls.receiver['id'])
# Delete test cluster
cls.delete_test_cluster(cls.cluster['id'])
# Delete profile
cls.delete_profile(cls.profile['id'])
super(TestReceiverShow, cls).resource_cleanup()
@decorators.idempotent_id('6a86b2e4-127a-4acc-b0ec-6f951b240e5b')
def test_show_receiver(self):
res = self.client.get_obj('receivers', self.receiver['id'])
self.assertEqual(200, res['status'])
self.assertIsNone(res['location'])
self.assertIsNotNone(res['body'])
receiver = res['body']
for key in ['action', 'actor', 'channel', 'cluster_id',
'created_at', 'domain', 'id', 'name', 'params',
'project', 'type', 'updated_at', 'user']:
self.assertIn(key, receiver)
|
<commit_before><commit_msg>Add API test for receiver show
Add API test for receiver show
Change-Id: I6d01189cdba772c1c085549a4a9838d43ce39a73<commit_after>
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestReceiverShow(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestReceiverShow, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(constants.spec_nova_server)
# Create a test cluster
cls.cluster = cls.create_test_cluster(cls.profile['id'],
0, min_size=0, max_size=-1)
# Create receiver
cls.receiver = cls.create_receiver(cls.cluster['id'],
'CLUSTER_RESIZE', 'webhook')
@classmethod
def resource_cleanup(cls):
# Delete receiver
cls.client.delete_obj('receivers', cls.receiver['id'])
# Delete test cluster
cls.delete_test_cluster(cls.cluster['id'])
# Delete profile
cls.delete_profile(cls.profile['id'])
super(TestReceiverShow, cls).resource_cleanup()
@decorators.idempotent_id('6a86b2e4-127a-4acc-b0ec-6f951b240e5b')
def test_show_receiver(self):
res = self.client.get_obj('receivers', self.receiver['id'])
self.assertEqual(200, res['status'])
self.assertIsNone(res['location'])
self.assertIsNotNone(res['body'])
receiver = res['body']
for key in ['action', 'actor', 'channel', 'cluster_id',
'created_at', 'domain', 'id', 'name', 'params',
'project', 'type', 'updated_at', 'user']:
self.assertIn(key, receiver)
|
Add API test for receiver show
Add API test for receiver show
Change-Id: I6d01189cdba772c1c085549a4a9838d43ce39a73# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestReceiverShow(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestReceiverShow, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(constants.spec_nova_server)
# Create a test cluster
cls.cluster = cls.create_test_cluster(cls.profile['id'],
0, min_size=0, max_size=-1)
# Create receiver
cls.receiver = cls.create_receiver(cls.cluster['id'],
'CLUSTER_RESIZE', 'webhook')
@classmethod
def resource_cleanup(cls):
# Delete receiver
cls.client.delete_obj('receivers', cls.receiver['id'])
# Delete test cluster
cls.delete_test_cluster(cls.cluster['id'])
# Delete profile
cls.delete_profile(cls.profile['id'])
super(TestReceiverShow, cls).resource_cleanup()
@decorators.idempotent_id('6a86b2e4-127a-4acc-b0ec-6f951b240e5b')
def test_show_receiver(self):
res = self.client.get_obj('receivers', self.receiver['id'])
self.assertEqual(200, res['status'])
self.assertIsNone(res['location'])
self.assertIsNotNone(res['body'])
receiver = res['body']
for key in ['action', 'actor', 'channel', 'cluster_id',
'created_at', 'domain', 'id', 'name', 'params',
'project', 'type', 'updated_at', 'user']:
self.assertIn(key, receiver)
|
<commit_before><commit_msg>Add API test for receiver show
Add API test for receiver show
Change-Id: I6d01189cdba772c1c085549a4a9838d43ce39a73<commit_after># Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib import decorators
from senlin.tests.tempest.api import base
from senlin.tests.tempest.common import constants
class TestReceiverShow(base.BaseSenlinTest):
@classmethod
def resource_setup(cls):
super(TestReceiverShow, cls).resource_setup()
# Create profile
cls.profile = cls.create_profile(constants.spec_nova_server)
# Create a test cluster
cls.cluster = cls.create_test_cluster(cls.profile['id'],
0, min_size=0, max_size=-1)
# Create receiver
cls.receiver = cls.create_receiver(cls.cluster['id'],
'CLUSTER_RESIZE', 'webhook')
@classmethod
def resource_cleanup(cls):
# Delete receiver
cls.client.delete_obj('receivers', cls.receiver['id'])
# Delete test cluster
cls.delete_test_cluster(cls.cluster['id'])
# Delete profile
cls.delete_profile(cls.profile['id'])
super(TestReceiverShow, cls).resource_cleanup()
@decorators.idempotent_id('6a86b2e4-127a-4acc-b0ec-6f951b240e5b')
def test_show_receiver(self):
res = self.client.get_obj('receivers', self.receiver['id'])
self.assertEqual(200, res['status'])
self.assertIsNone(res['location'])
self.assertIsNotNone(res['body'])
receiver = res['body']
for key in ['action', 'actor', 'channel', 'cluster_id',
'created_at', 'domain', 'id', 'name', 'params',
'project', 'type', 'updated_at', 'user']:
self.assertIn(key, receiver)
|
|
7c5ec81cd0775aaa47b004d275eac6496dd53297
|
__init__.py
|
__init__.py
|
import os
import sys
def get_path(version=2014):
'''Get the path to these shaders for stuffing into Maya envvars.'''
return os.path.abspath(os.path.join(
__file__,
'..',
'build',
'%s-%d' % (
'macosx' if sys.platform == 'darwin' else 'linux',
version
)
))
|
Add a hook for key_base to find this tool
|
Add a hook for key_base to find this tool
For the dev command, really.
|
Python
|
bsd-3-clause
|
westernx/ksmrshaders
|
Add a hook for key_base to find this tool
For the dev command, really.
|
import os
import sys
def get_path(version=2014):
'''Get the path to these shaders for stuffing into Maya envvars.'''
return os.path.abspath(os.path.join(
__file__,
'..',
'build',
'%s-%d' % (
'macosx' if sys.platform == 'darwin' else 'linux',
version
)
))
|
<commit_before><commit_msg>Add a hook for key_base to find this tool
For the dev command, really.<commit_after>
|
import os
import sys
def get_path(version=2014):
'''Get the path to these shaders for stuffing into Maya envvars.'''
return os.path.abspath(os.path.join(
__file__,
'..',
'build',
'%s-%d' % (
'macosx' if sys.platform == 'darwin' else 'linux',
version
)
))
|
Add a hook for key_base to find this tool
For the dev command, really.import os
import sys
def get_path(version=2014):
'''Get the path to these shaders for stuffing into Maya envvars.'''
return os.path.abspath(os.path.join(
__file__,
'..',
'build',
'%s-%d' % (
'macosx' if sys.platform == 'darwin' else 'linux',
version
)
))
|
<commit_before><commit_msg>Add a hook for key_base to find this tool
For the dev command, really.<commit_after>import os
import sys
def get_path(version=2014):
'''Get the path to these shaders for stuffing into Maya envvars.'''
return os.path.abspath(os.path.join(
__file__,
'..',
'build',
'%s-%d' % (
'macosx' if sys.platform == 'darwin' else 'linux',
version
)
))
|
|
dc7887dde6b2ca8136647051da3c8cbe8762ca65
|
samples/migrations/0014_auto_20170529_0935.py
|
samples/migrations/0014_auto_20170529_0935.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-29 12:35
from __future__ import unicode_literals
from django.db import migrations
import samples.custom.models
class Migration(migrations.Migration):
dependencies = [
('samples', '0013_auto_20170526_1718'),
]
operations = [
migrations.AlterField(
model_name='observedsymptom',
name='observed',
field=samples.custom.models.YesNoIgnoredField(default=None, verbose_name='Apresenta sintoma?'),
),
]
|
Add mitration file for changing ObservedSymptom field
|
:memo: Add mitration file for changing ObservedSymptom field
|
Python
|
mit
|
gems-uff/labsys,gems-uff/labsys,gems-uff/labsys
|
:memo: Add mitration file for changing ObservedSymptom field
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-29 12:35
from __future__ import unicode_literals
from django.db import migrations
import samples.custom.models
class Migration(migrations.Migration):
dependencies = [
('samples', '0013_auto_20170526_1718'),
]
operations = [
migrations.AlterField(
model_name='observedsymptom',
name='observed',
field=samples.custom.models.YesNoIgnoredField(default=None, verbose_name='Apresenta sintoma?'),
),
]
|
<commit_before><commit_msg>:memo: Add mitration file for changing ObservedSymptom field<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-29 12:35
from __future__ import unicode_literals
from django.db import migrations
import samples.custom.models
class Migration(migrations.Migration):
dependencies = [
('samples', '0013_auto_20170526_1718'),
]
operations = [
migrations.AlterField(
model_name='observedsymptom',
name='observed',
field=samples.custom.models.YesNoIgnoredField(default=None, verbose_name='Apresenta sintoma?'),
),
]
|
:memo: Add mitration file for changing ObservedSymptom field# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-29 12:35
from __future__ import unicode_literals
from django.db import migrations
import samples.custom.models
class Migration(migrations.Migration):
dependencies = [
('samples', '0013_auto_20170526_1718'),
]
operations = [
migrations.AlterField(
model_name='observedsymptom',
name='observed',
field=samples.custom.models.YesNoIgnoredField(default=None, verbose_name='Apresenta sintoma?'),
),
]
|
<commit_before><commit_msg>:memo: Add mitration file for changing ObservedSymptom field<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-05-29 12:35
from __future__ import unicode_literals
from django.db import migrations
import samples.custom.models
class Migration(migrations.Migration):
dependencies = [
('samples', '0013_auto_20170526_1718'),
]
operations = [
migrations.AlterField(
model_name='observedsymptom',
name='observed',
field=samples.custom.models.YesNoIgnoredField(default=None, verbose_name='Apresenta sintoma?'),
),
]
|
|
39ec6a5c1c42ababb44aa1863d5e441ec807f345
|
sara_flexbe_states/src/sara_flexbe_states/Wonderland_Read_Entity_Position.py
|
sara_flexbe_states/src/sara_flexbe_states/Wonderland_Read_Entity_Position.py
|
#!/usr/bin/env python
# encoding=utf8
from flexbe_core import EventState, Logger
import json
class Wonderland_Read_Entity_Position(EventState):
'''
Read the 3D position of an entity in a json string
># json_text string command to read
#< x_pos int x position of the entity
#< y_pos int y position of the entity
#< z_pos int z position of the entity
<= done return when at least one entity exist
<= zero return when no entity have the selected name
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(Wonderland_Read_Entity_Position, self).__init__(outcomes=['done', 'zero', 'error'], input_keys=['json_text'],
output_keys=['x_pos', 'y_pos', 'z_pos'])
def execute(self, userdata):
#parse parameter json data
data = json.loads(userdata.json_text)
#read if there is data
if not data:
#continue to Zero
return 'zero'
#try to read data
if 'x' not in data[0]:
#continue to Error
return 'error'
if 'y' not in data[0]:
#continue to Error
return 'error'
if 'z' not in data[0]:
#continue to Error
return 'error'
#write return datas
userdata.x_pos = data[0]['x']
userdata.y_pos = data[0]['y']
userdata.z_pos = data[0]['z']
#continue to Done
return 'done'
|
Add a state for read an entity position in Wonderland.
|
Add a state for read an entity position in Wonderland.
|
Python
|
bsd-3-clause
|
WalkingMachine/sara_behaviors,WalkingMachine/sara_behaviors
|
Add a state for read an entity position in Wonderland.
|
#!/usr/bin/env python
# encoding=utf8
from flexbe_core import EventState, Logger
import json
class Wonderland_Read_Entity_Position(EventState):
'''
Read the 3D position of an entity in a json string
># json_text string command to read
#< x_pos int x position of the entity
#< y_pos int y position of the entity
#< z_pos int z position of the entity
<= done return when at least one entity exist
<= zero return when no entity have the selected name
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(Wonderland_Read_Entity_Position, self).__init__(outcomes=['done', 'zero', 'error'], input_keys=['json_text'],
output_keys=['x_pos', 'y_pos', 'z_pos'])
def execute(self, userdata):
#parse parameter json data
data = json.loads(userdata.json_text)
#read if there is data
if not data:
#continue to Zero
return 'zero'
#try to read data
if 'x' not in data[0]:
#continue to Error
return 'error'
if 'y' not in data[0]:
#continue to Error
return 'error'
if 'z' not in data[0]:
#continue to Error
return 'error'
#write return datas
userdata.x_pos = data[0]['x']
userdata.y_pos = data[0]['y']
userdata.z_pos = data[0]['z']
#continue to Done
return 'done'
|
<commit_before><commit_msg>Add a state for read an entity position in Wonderland.<commit_after>
|
#!/usr/bin/env python
# encoding=utf8
from flexbe_core import EventState, Logger
import json
class Wonderland_Read_Entity_Position(EventState):
'''
Read the 3D position of an entity in a json string
># json_text string command to read
#< x_pos int x position of the entity
#< y_pos int y position of the entity
#< z_pos int z position of the entity
<= done return when at least one entity exist
<= zero return when no entity have the selected name
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(Wonderland_Read_Entity_Position, self).__init__(outcomes=['done', 'zero', 'error'], input_keys=['json_text'],
output_keys=['x_pos', 'y_pos', 'z_pos'])
def execute(self, userdata):
#parse parameter json data
data = json.loads(userdata.json_text)
#read if there is data
if not data:
#continue to Zero
return 'zero'
#try to read data
if 'x' not in data[0]:
#continue to Error
return 'error'
if 'y' not in data[0]:
#continue to Error
return 'error'
if 'z' not in data[0]:
#continue to Error
return 'error'
#write return datas
userdata.x_pos = data[0]['x']
userdata.y_pos = data[0]['y']
userdata.z_pos = data[0]['z']
#continue to Done
return 'done'
|
Add a state for read an entity position in Wonderland.#!/usr/bin/env python
# encoding=utf8
from flexbe_core import EventState, Logger
import json
class Wonderland_Read_Entity_Position(EventState):
'''
Read the 3D position of an entity in a json string
># json_text string command to read
#< x_pos int x position of the entity
#< y_pos int y position of the entity
#< z_pos int z position of the entity
<= done return when at least one entity exist
<= zero return when no entity have the selected name
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(Wonderland_Read_Entity_Position, self).__init__(outcomes=['done', 'zero', 'error'], input_keys=['json_text'],
output_keys=['x_pos', 'y_pos', 'z_pos'])
def execute(self, userdata):
#parse parameter json data
data = json.loads(userdata.json_text)
#read if there is data
if not data:
#continue to Zero
return 'zero'
#try to read data
if 'x' not in data[0]:
#continue to Error
return 'error'
if 'y' not in data[0]:
#continue to Error
return 'error'
if 'z' not in data[0]:
#continue to Error
return 'error'
#write return datas
userdata.x_pos = data[0]['x']
userdata.y_pos = data[0]['y']
userdata.z_pos = data[0]['z']
#continue to Done
return 'done'
|
<commit_before><commit_msg>Add a state for read an entity position in Wonderland.<commit_after>#!/usr/bin/env python
# encoding=utf8
from flexbe_core import EventState, Logger
import json
class Wonderland_Read_Entity_Position(EventState):
'''
Read the 3D position of an entity in a json string
># json_text string command to read
#< x_pos int x position of the entity
#< y_pos int y position of the entity
#< z_pos int z position of the entity
<= done return when at least one entity exist
<= zero return when no entity have the selected name
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(Wonderland_Read_Entity_Position, self).__init__(outcomes=['done', 'zero', 'error'], input_keys=['json_text'],
output_keys=['x_pos', 'y_pos', 'z_pos'])
def execute(self, userdata):
#parse parameter json data
data = json.loads(userdata.json_text)
#read if there is data
if not data:
#continue to Zero
return 'zero'
#try to read data
if 'x' not in data[0]:
#continue to Error
return 'error'
if 'y' not in data[0]:
#continue to Error
return 'error'
if 'z' not in data[0]:
#continue to Error
return 'error'
#write return datas
userdata.x_pos = data[0]['x']
userdata.y_pos = data[0]['y']
userdata.z_pos = data[0]['z']
#continue to Done
return 'done'
|
|
32c190b4ba195c164686fdef03029edda38549eb
|
velocity_step_example2sv.py
|
velocity_step_example2sv.py
|
import numpy as np
import matplotlib.pyplot as plt
import rsf
model = rsf.ExternalSystem()
# Set model initial conditions
model.mu0 = 0.6 # Friction initial (at the reference velocity)
model.a = 0.005 # Empirical coefficient for the direct effect
model.k = 1e-3 # Normalized System stiffness (friction/micron)
model.v = 1. # Initial slider velocity, generally is vlp(t=0)
model.vref = 1. # Reference velocity, generally vlp(t=0)
state1 = rsf.DieterichState(model)
state1.b = 0.01 # Empirical coefficient for the evolution effect
state1.Dc = 10. # Critical slip distance
state2 = rsf.DieterichState(model)
state2.b = 0.001 # Empirical coefficient for the evolution effect
state2.Dc = 5. # Critical slip distance
model.state_relations = [state1,state2] # Which state relation we want to use
# We want to solve for 40 seconds at 100Hz
model.model_time = np.arange(0,40.01,0.01)
# We want to slide at 1 um/s for 10 s, then at 10 um/s for 31
lp_velocity = np.ones_like(model.model_time)
lp_velocity[10*100:] = 10. # Velocity after 10 seconds is 10 um/s
# Set the model load point velocity, must be same shape as model.model_time
model.loadpoint_velocity = lp_velocity
# Run the model!
solver = rsf.RateState()
solver.solve(model)
# Make the phase plot
solver.phasePlot(model)
# Make a plot in displacement
solver.dispPlot(model)
# Make a plot in time
solver.timePlot(model)
|
Add two state variable example
|
Add two state variable example
|
Python
|
mit
|
jrleeman/rsfmodel
|
Add two state variable example
|
import numpy as np
import matplotlib.pyplot as plt
import rsf
model = rsf.ExternalSystem()
# Set model initial conditions
model.mu0 = 0.6 # Friction initial (at the reference velocity)
model.a = 0.005 # Empirical coefficient for the direct effect
model.k = 1e-3 # Normalized System stiffness (friction/micron)
model.v = 1. # Initial slider velocity, generally is vlp(t=0)
model.vref = 1. # Reference velocity, generally vlp(t=0)
state1 = rsf.DieterichState(model)
state1.b = 0.01 # Empirical coefficient for the evolution effect
state1.Dc = 10. # Critical slip distance
state2 = rsf.DieterichState(model)
state2.b = 0.001 # Empirical coefficient for the evolution effect
state2.Dc = 5. # Critical slip distance
model.state_relations = [state1,state2] # Which state relation we want to use
# We want to solve for 40 seconds at 100Hz
model.model_time = np.arange(0,40.01,0.01)
# We want to slide at 1 um/s for 10 s, then at 10 um/s for 31
lp_velocity = np.ones_like(model.model_time)
lp_velocity[10*100:] = 10. # Velocity after 10 seconds is 10 um/s
# Set the model load point velocity, must be same shape as model.model_time
model.loadpoint_velocity = lp_velocity
# Run the model!
solver = rsf.RateState()
solver.solve(model)
# Make the phase plot
solver.phasePlot(model)
# Make a plot in displacement
solver.dispPlot(model)
# Make a plot in time
solver.timePlot(model)
|
<commit_before><commit_msg>Add two state variable example<commit_after>
|
import numpy as np
import matplotlib.pyplot as plt
import rsf
model = rsf.ExternalSystem()
# Set model initial conditions
model.mu0 = 0.6 # Friction initial (at the reference velocity)
model.a = 0.005 # Empirical coefficient for the direct effect
model.k = 1e-3 # Normalized System stiffness (friction/micron)
model.v = 1. # Initial slider velocity, generally is vlp(t=0)
model.vref = 1. # Reference velocity, generally vlp(t=0)
state1 = rsf.DieterichState(model)
state1.b = 0.01 # Empirical coefficient for the evolution effect
state1.Dc = 10. # Critical slip distance
state2 = rsf.DieterichState(model)
state2.b = 0.001 # Empirical coefficient for the evolution effect
state2.Dc = 5. # Critical slip distance
model.state_relations = [state1,state2] # Which state relation we want to use
# We want to solve for 40 seconds at 100Hz
model.model_time = np.arange(0,40.01,0.01)
# We want to slide at 1 um/s for 10 s, then at 10 um/s for 31
lp_velocity = np.ones_like(model.model_time)
lp_velocity[10*100:] = 10. # Velocity after 10 seconds is 10 um/s
# Set the model load point velocity, must be same shape as model.model_time
model.loadpoint_velocity = lp_velocity
# Run the model!
solver = rsf.RateState()
solver.solve(model)
# Make the phase plot
solver.phasePlot(model)
# Make a plot in displacement
solver.dispPlot(model)
# Make a plot in time
solver.timePlot(model)
|
Add two state variable exampleimport numpy as np
import matplotlib.pyplot as plt
import rsf
model = rsf.ExternalSystem()
# Set model initial conditions
model.mu0 = 0.6 # Friction initial (at the reference velocity)
model.a = 0.005 # Empirical coefficient for the direct effect
model.k = 1e-3 # Normalized System stiffness (friction/micron)
model.v = 1. # Initial slider velocity, generally is vlp(t=0)
model.vref = 1. # Reference velocity, generally vlp(t=0)
state1 = rsf.DieterichState(model)
state1.b = 0.01 # Empirical coefficient for the evolution effect
state1.Dc = 10. # Critical slip distance
state2 = rsf.DieterichState(model)
state2.b = 0.001 # Empirical coefficient for the evolution effect
state2.Dc = 5. # Critical slip distance
model.state_relations = [state1,state2] # Which state relation we want to use
# We want to solve for 40 seconds at 100Hz
model.model_time = np.arange(0,40.01,0.01)
# We want to slide at 1 um/s for 10 s, then at 10 um/s for 31
lp_velocity = np.ones_like(model.model_time)
lp_velocity[10*100:] = 10. # Velocity after 10 seconds is 10 um/s
# Set the model load point velocity, must be same shape as model.model_time
model.loadpoint_velocity = lp_velocity
# Run the model!
solver = rsf.RateState()
solver.solve(model)
# Make the phase plot
solver.phasePlot(model)
# Make a plot in displacement
solver.dispPlot(model)
# Make a plot in time
solver.timePlot(model)
|
<commit_before><commit_msg>Add two state variable example<commit_after>import numpy as np
import matplotlib.pyplot as plt
import rsf
model = rsf.ExternalSystem()
# Set model initial conditions
model.mu0 = 0.6 # Friction initial (at the reference velocity)
model.a = 0.005 # Empirical coefficient for the direct effect
model.k = 1e-3 # Normalized System stiffness (friction/micron)
model.v = 1. # Initial slider velocity, generally is vlp(t=0)
model.vref = 1. # Reference velocity, generally vlp(t=0)
state1 = rsf.DieterichState(model)
state1.b = 0.01 # Empirical coefficient for the evolution effect
state1.Dc = 10. # Critical slip distance
state2 = rsf.DieterichState(model)
state2.b = 0.001 # Empirical coefficient for the evolution effect
state2.Dc = 5. # Critical slip distance
model.state_relations = [state1,state2] # Which state relation we want to use
# We want to solve for 40 seconds at 100Hz
model.model_time = np.arange(0,40.01,0.01)
# We want to slide at 1 um/s for 10 s, then at 10 um/s for 31
lp_velocity = np.ones_like(model.model_time)
lp_velocity[10*100:] = 10. # Velocity after 10 seconds is 10 um/s
# Set the model load point velocity, must be same shape as model.model_time
model.loadpoint_velocity = lp_velocity
# Run the model!
solver = rsf.RateState()
solver.solve(model)
# Make the phase plot
solver.phasePlot(model)
# Make a plot in displacement
solver.dispPlot(model)
# Make a plot in time
solver.timePlot(model)
|
|
6acc7838e250512957392fe3577f24f97c712cd2
|
tests/app/notify_client/test_status_api_client.py
|
tests/app/notify_client/test_status_api_client.py
|
from app.notify_client.status_api_client import StatusApiClient
def test_get_count_of_live_services_and_organisations(mocker):
mocker.patch('app.extensions.RedisClient.get', return_value=None)
client = StatusApiClient()
mock = mocker.patch.object(client, 'get', return_value={})
client.get_count_of_live_services_and_organisations()
mock.assert_called_once_with(url='/_status/live-service-and-organisation-counts')
def test_sets_value_in_cache(mocker):
client = StatusApiClient()
mock_redis_get = mocker.patch(
'app.extensions.RedisClient.get',
return_value=None
)
mock_api_get = mocker.patch(
'app.notify_client.NotifyAdminAPIClient.get',
return_value={'data_from': 'api'},
)
mock_redis_set = mocker.patch(
'app.extensions.RedisClient.set',
)
assert client.get_count_of_live_services_and_organisations() == {'data_from': 'api'}
mock_redis_get.assert_called_once_with('live-service-and-organisation-counts')
mock_api_get.assert_called_once_with(url='/_status/live-service-and-organisation-counts')
mock_redis_set.assert_called_once_with(
'live-service-and-organisation-counts',
'{"data_from": "api"}',
ex=604800
)
def test_returns_value_from_cache(mocker):
client = StatusApiClient()
mock_redis_get = mocker.patch(
'app.extensions.RedisClient.get',
return_value=b'{"data_from": "cache"}',
)
mock_api_get = mocker.patch(
'app.notify_client.NotifyAdminAPIClient.get',
)
mock_redis_set = mocker.patch(
'app.extensions.RedisClient.set',
)
assert client.get_count_of_live_services_and_organisations() == {'data_from': 'cache'}
mock_redis_get.assert_called_once_with('live-service-and-organisation-counts')
assert mock_api_get.called is False
assert mock_redis_set.called is False
|
Add tests for existing status api client
|
Add tests for existing status api client
Essentially copies the tests found in the performance_platform
api client.
|
Python
|
mit
|
alphagov/notifications-admin,alphagov/notifications-admin,alphagov/notifications-admin,alphagov/notifications-admin
|
Add tests for existing status api client
Essentially copies the tests found in the performance_platform
api client.
|
from app.notify_client.status_api_client import StatusApiClient
def test_get_count_of_live_services_and_organisations(mocker):
mocker.patch('app.extensions.RedisClient.get', return_value=None)
client = StatusApiClient()
mock = mocker.patch.object(client, 'get', return_value={})
client.get_count_of_live_services_and_organisations()
mock.assert_called_once_with(url='/_status/live-service-and-organisation-counts')
def test_sets_value_in_cache(mocker):
client = StatusApiClient()
mock_redis_get = mocker.patch(
'app.extensions.RedisClient.get',
return_value=None
)
mock_api_get = mocker.patch(
'app.notify_client.NotifyAdminAPIClient.get',
return_value={'data_from': 'api'},
)
mock_redis_set = mocker.patch(
'app.extensions.RedisClient.set',
)
assert client.get_count_of_live_services_and_organisations() == {'data_from': 'api'}
mock_redis_get.assert_called_once_with('live-service-and-organisation-counts')
mock_api_get.assert_called_once_with(url='/_status/live-service-and-organisation-counts')
mock_redis_set.assert_called_once_with(
'live-service-and-organisation-counts',
'{"data_from": "api"}',
ex=604800
)
def test_returns_value_from_cache(mocker):
client = StatusApiClient()
mock_redis_get = mocker.patch(
'app.extensions.RedisClient.get',
return_value=b'{"data_from": "cache"}',
)
mock_api_get = mocker.patch(
'app.notify_client.NotifyAdminAPIClient.get',
)
mock_redis_set = mocker.patch(
'app.extensions.RedisClient.set',
)
assert client.get_count_of_live_services_and_organisations() == {'data_from': 'cache'}
mock_redis_get.assert_called_once_with('live-service-and-organisation-counts')
assert mock_api_get.called is False
assert mock_redis_set.called is False
|
<commit_before><commit_msg>Add tests for existing status api client
Essentially copies the tests found in the performance_platform
api client.<commit_after>
|
from app.notify_client.status_api_client import StatusApiClient
def test_get_count_of_live_services_and_organisations(mocker):
mocker.patch('app.extensions.RedisClient.get', return_value=None)
client = StatusApiClient()
mock = mocker.patch.object(client, 'get', return_value={})
client.get_count_of_live_services_and_organisations()
mock.assert_called_once_with(url='/_status/live-service-and-organisation-counts')
def test_sets_value_in_cache(mocker):
client = StatusApiClient()
mock_redis_get = mocker.patch(
'app.extensions.RedisClient.get',
return_value=None
)
mock_api_get = mocker.patch(
'app.notify_client.NotifyAdminAPIClient.get',
return_value={'data_from': 'api'},
)
mock_redis_set = mocker.patch(
'app.extensions.RedisClient.set',
)
assert client.get_count_of_live_services_and_organisations() == {'data_from': 'api'}
mock_redis_get.assert_called_once_with('live-service-and-organisation-counts')
mock_api_get.assert_called_once_with(url='/_status/live-service-and-organisation-counts')
mock_redis_set.assert_called_once_with(
'live-service-and-organisation-counts',
'{"data_from": "api"}',
ex=604800
)
def test_returns_value_from_cache(mocker):
client = StatusApiClient()
mock_redis_get = mocker.patch(
'app.extensions.RedisClient.get',
return_value=b'{"data_from": "cache"}',
)
mock_api_get = mocker.patch(
'app.notify_client.NotifyAdminAPIClient.get',
)
mock_redis_set = mocker.patch(
'app.extensions.RedisClient.set',
)
assert client.get_count_of_live_services_and_organisations() == {'data_from': 'cache'}
mock_redis_get.assert_called_once_with('live-service-and-organisation-counts')
assert mock_api_get.called is False
assert mock_redis_set.called is False
|
Add tests for existing status api client
Essentially copies the tests found in the performance_platform
api client.from app.notify_client.status_api_client import StatusApiClient
def test_get_count_of_live_services_and_organisations(mocker):
mocker.patch('app.extensions.RedisClient.get', return_value=None)
client = StatusApiClient()
mock = mocker.patch.object(client, 'get', return_value={})
client.get_count_of_live_services_and_organisations()
mock.assert_called_once_with(url='/_status/live-service-and-organisation-counts')
def test_sets_value_in_cache(mocker):
client = StatusApiClient()
mock_redis_get = mocker.patch(
'app.extensions.RedisClient.get',
return_value=None
)
mock_api_get = mocker.patch(
'app.notify_client.NotifyAdminAPIClient.get',
return_value={'data_from': 'api'},
)
mock_redis_set = mocker.patch(
'app.extensions.RedisClient.set',
)
assert client.get_count_of_live_services_and_organisations() == {'data_from': 'api'}
mock_redis_get.assert_called_once_with('live-service-and-organisation-counts')
mock_api_get.assert_called_once_with(url='/_status/live-service-and-organisation-counts')
mock_redis_set.assert_called_once_with(
'live-service-and-organisation-counts',
'{"data_from": "api"}',
ex=604800
)
def test_returns_value_from_cache(mocker):
client = StatusApiClient()
mock_redis_get = mocker.patch(
'app.extensions.RedisClient.get',
return_value=b'{"data_from": "cache"}',
)
mock_api_get = mocker.patch(
'app.notify_client.NotifyAdminAPIClient.get',
)
mock_redis_set = mocker.patch(
'app.extensions.RedisClient.set',
)
assert client.get_count_of_live_services_and_organisations() == {'data_from': 'cache'}
mock_redis_get.assert_called_once_with('live-service-and-organisation-counts')
assert mock_api_get.called is False
assert mock_redis_set.called is False
|
<commit_before><commit_msg>Add tests for existing status api client
Essentially copies the tests found in the performance_platform
api client.<commit_after>from app.notify_client.status_api_client import StatusApiClient
def test_get_count_of_live_services_and_organisations(mocker):
mocker.patch('app.extensions.RedisClient.get', return_value=None)
client = StatusApiClient()
mock = mocker.patch.object(client, 'get', return_value={})
client.get_count_of_live_services_and_organisations()
mock.assert_called_once_with(url='/_status/live-service-and-organisation-counts')
def test_sets_value_in_cache(mocker):
client = StatusApiClient()
mock_redis_get = mocker.patch(
'app.extensions.RedisClient.get',
return_value=None
)
mock_api_get = mocker.patch(
'app.notify_client.NotifyAdminAPIClient.get',
return_value={'data_from': 'api'},
)
mock_redis_set = mocker.patch(
'app.extensions.RedisClient.set',
)
assert client.get_count_of_live_services_and_organisations() == {'data_from': 'api'}
mock_redis_get.assert_called_once_with('live-service-and-organisation-counts')
mock_api_get.assert_called_once_with(url='/_status/live-service-and-organisation-counts')
mock_redis_set.assert_called_once_with(
'live-service-and-organisation-counts',
'{"data_from": "api"}',
ex=604800
)
def test_returns_value_from_cache(mocker):
client = StatusApiClient()
mock_redis_get = mocker.patch(
'app.extensions.RedisClient.get',
return_value=b'{"data_from": "cache"}',
)
mock_api_get = mocker.patch(
'app.notify_client.NotifyAdminAPIClient.get',
)
mock_redis_set = mocker.patch(
'app.extensions.RedisClient.set',
)
assert client.get_count_of_live_services_and_organisations() == {'data_from': 'cache'}
mock_redis_get.assert_called_once_with('live-service-and-organisation-counts')
assert mock_api_get.called is False
assert mock_redis_set.called is False
|
|
5973e21ee459dd4f659f9a3a94be48e8a0df13bc
|
py/brick-wall.py
|
py/brick-wall.py
|
from collections import Counter
class Solution(object):
def leastBricks(self, wall):
"""
:type wall: List[List[int]]
:rtype: int
"""
c = Counter()
wall_width = sum(wall[0])
max_non_cut = 0
for row in wall:
subsum = 0
for n in row:
subsum += n
c[subsum] += 1
if subsum < wall_width:
max_non_cut = max(c[subsum], max_non_cut)
return len(wall) - max_non_cut
|
Add py solution for 554. Brick Wall
|
Add py solution for 554. Brick Wall
554. Brick Wall: https://leetcode.com/problems/brick-wall/
|
Python
|
apache-2.0
|
ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode
|
Add py solution for 554. Brick Wall
554. Brick Wall: https://leetcode.com/problems/brick-wall/
|
from collections import Counter
class Solution(object):
def leastBricks(self, wall):
"""
:type wall: List[List[int]]
:rtype: int
"""
c = Counter()
wall_width = sum(wall[0])
max_non_cut = 0
for row in wall:
subsum = 0
for n in row:
subsum += n
c[subsum] += 1
if subsum < wall_width:
max_non_cut = max(c[subsum], max_non_cut)
return len(wall) - max_non_cut
|
<commit_before><commit_msg>Add py solution for 554. Brick Wall
554. Brick Wall: https://leetcode.com/problems/brick-wall/<commit_after>
|
from collections import Counter
class Solution(object):
def leastBricks(self, wall):
"""
:type wall: List[List[int]]
:rtype: int
"""
c = Counter()
wall_width = sum(wall[0])
max_non_cut = 0
for row in wall:
subsum = 0
for n in row:
subsum += n
c[subsum] += 1
if subsum < wall_width:
max_non_cut = max(c[subsum], max_non_cut)
return len(wall) - max_non_cut
|
Add py solution for 554. Brick Wall
554. Brick Wall: https://leetcode.com/problems/brick-wall/from collections import Counter
class Solution(object):
def leastBricks(self, wall):
"""
:type wall: List[List[int]]
:rtype: int
"""
c = Counter()
wall_width = sum(wall[0])
max_non_cut = 0
for row in wall:
subsum = 0
for n in row:
subsum += n
c[subsum] += 1
if subsum < wall_width:
max_non_cut = max(c[subsum], max_non_cut)
return len(wall) - max_non_cut
|
<commit_before><commit_msg>Add py solution for 554. Brick Wall
554. Brick Wall: https://leetcode.com/problems/brick-wall/<commit_after>from collections import Counter
class Solution(object):
def leastBricks(self, wall):
"""
:type wall: List[List[int]]
:rtype: int
"""
c = Counter()
wall_width = sum(wall[0])
max_non_cut = 0
for row in wall:
subsum = 0
for n in row:
subsum += n
c[subsum] += 1
if subsum < wall_width:
max_non_cut = max(c[subsum], max_non_cut)
return len(wall) - max_non_cut
|
|
f0251c2638f1242655b514eb4d8fddf16c655ad0
|
test/test_data_uri.py
|
test/test_data_uri.py
|
import os
import unittest
from node import data_uri
TEST_TXT_FILE = "data_uri_test.txt"
TEST_CONTENTS = "foo\n"
class TestDataURI(unittest.TestCase):
def setUp(self):
with open(TEST_TXT_FILE, 'w') as f:
f.write(TEST_CONTENTS)
def tearDown(self):
os.remove(TEST_TXT_FILE)
def test_init_from_file(self):
uri = data_uri.DataURI.from_file(TEST_TXT_FILE, base64=False)
self.assertEqual(uri.data, TEST_CONTENTS)
self.assertEqual(uri.mimetype, "text/plain")
self.assertFalse(uri.is_base64)
def test_init_from_args(self):
data = "I like trains"
charset = 'us-ascii'
mime = 'text/plain'
uri = data_uri.DataURI.make(
mime,
charset,
base64=True,
data=data)
self.assertEqual(uri.data, data)
self.assertEqual(uri.charset, charset)
self.assertEqual(uri.mimetype, mime)
self.assertTrue(uri.is_base64)
if __name__ == '__main__':
unittest.main()
|
Add tests for data_uri module
|
Add tests for data_uri module
Address reviewer comments
|
Python
|
mit
|
bglassy/OpenBazaar,bankonme/OpenBazaar,bankonme/OpenBazaar,atsuyim/OpenBazaar,rllola/OpenBazaar,NolanZhao/OpenBazaar,mirrax/OpenBazaar,bglassy/OpenBazaar,matiasbastos/OpenBazaar,habibmasuro/OpenBazaar,NolanZhao/OpenBazaar,saltduck/OpenBazaar,akhavr/OpenBazaar,mirrax/OpenBazaar,matiasbastos/OpenBazaar,NolanZhao/OpenBazaar,bankonme/OpenBazaar,Renelvon/OpenBazaar,dionyziz/OpenBazaar,habibmasuro/OpenBazaar,Renelvon/OpenBazaar,habibmasuro/OpenBazaar,dionyziz/OpenBazaar,matiasbastos/OpenBazaar,saltduck/OpenBazaar,must-/OpenBazaar,bglassy/OpenBazaar,atsuyim/OpenBazaar,bglassy/OpenBazaar,freebazaar/FreeBazaar,tortxof/OpenBazaar,rllola/OpenBazaar,dionyziz/OpenBazaar,habibmasuro/OpenBazaar,atsuyim/OpenBazaar,im0rtel/OpenBazaar,akhavr/OpenBazaar,mirrax/OpenBazaar,im0rtel/OpenBazaar,tortxof/OpenBazaar,freebazaar/FreeBazaar,blakejakopovic/OpenBazaar,atsuyim/OpenBazaar,tortxof/OpenBazaar,Renelvon/OpenBazaar,blakejakopovic/OpenBazaar,dionyziz/OpenBazaar,akhavr/OpenBazaar,must-/OpenBazaar,bankonme/OpenBazaar,rllola/OpenBazaar,NolanZhao/OpenBazaar,saltduck/OpenBazaar,mirrax/OpenBazaar,saltduck/OpenBazaar,dionyziz/OpenBazaar,blakejakopovic/OpenBazaar,must-/OpenBazaar,freebazaar/FreeBazaar,matiasbastos/OpenBazaar,freebazaar/FreeBazaar,must-/OpenBazaar,akhavr/OpenBazaar,blakejakopovic/OpenBazaar,tortxof/OpenBazaar,Renelvon/OpenBazaar,rllola/OpenBazaar,im0rtel/OpenBazaar,akhavr/OpenBazaar,im0rtel/OpenBazaar,freebazaar/FreeBazaar
|
Add tests for data_uri module
Address reviewer comments
|
import os
import unittest
from node import data_uri
TEST_TXT_FILE = "data_uri_test.txt"
TEST_CONTENTS = "foo\n"
class TestDataURI(unittest.TestCase):
def setUp(self):
with open(TEST_TXT_FILE, 'w') as f:
f.write(TEST_CONTENTS)
def tearDown(self):
os.remove(TEST_TXT_FILE)
def test_init_from_file(self):
uri = data_uri.DataURI.from_file(TEST_TXT_FILE, base64=False)
self.assertEqual(uri.data, TEST_CONTENTS)
self.assertEqual(uri.mimetype, "text/plain")
self.assertFalse(uri.is_base64)
def test_init_from_args(self):
data = "I like trains"
charset = 'us-ascii'
mime = 'text/plain'
uri = data_uri.DataURI.make(
mime,
charset,
base64=True,
data=data)
self.assertEqual(uri.data, data)
self.assertEqual(uri.charset, charset)
self.assertEqual(uri.mimetype, mime)
self.assertTrue(uri.is_base64)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for data_uri module
Address reviewer comments<commit_after>
|
import os
import unittest
from node import data_uri
TEST_TXT_FILE = "data_uri_test.txt"
TEST_CONTENTS = "foo\n"
class TestDataURI(unittest.TestCase):
def setUp(self):
with open(TEST_TXT_FILE, 'w') as f:
f.write(TEST_CONTENTS)
def tearDown(self):
os.remove(TEST_TXT_FILE)
def test_init_from_file(self):
uri = data_uri.DataURI.from_file(TEST_TXT_FILE, base64=False)
self.assertEqual(uri.data, TEST_CONTENTS)
self.assertEqual(uri.mimetype, "text/plain")
self.assertFalse(uri.is_base64)
def test_init_from_args(self):
data = "I like trains"
charset = 'us-ascii'
mime = 'text/plain'
uri = data_uri.DataURI.make(
mime,
charset,
base64=True,
data=data)
self.assertEqual(uri.data, data)
self.assertEqual(uri.charset, charset)
self.assertEqual(uri.mimetype, mime)
self.assertTrue(uri.is_base64)
if __name__ == '__main__':
unittest.main()
|
Add tests for data_uri module
Address reviewer commentsimport os
import unittest
from node import data_uri
TEST_TXT_FILE = "data_uri_test.txt"
TEST_CONTENTS = "foo\n"
class TestDataURI(unittest.TestCase):
def setUp(self):
with open(TEST_TXT_FILE, 'w') as f:
f.write(TEST_CONTENTS)
def tearDown(self):
os.remove(TEST_TXT_FILE)
def test_init_from_file(self):
uri = data_uri.DataURI.from_file(TEST_TXT_FILE, base64=False)
self.assertEqual(uri.data, TEST_CONTENTS)
self.assertEqual(uri.mimetype, "text/plain")
self.assertFalse(uri.is_base64)
def test_init_from_args(self):
data = "I like trains"
charset = 'us-ascii'
mime = 'text/plain'
uri = data_uri.DataURI.make(
mime,
charset,
base64=True,
data=data)
self.assertEqual(uri.data, data)
self.assertEqual(uri.charset, charset)
self.assertEqual(uri.mimetype, mime)
self.assertTrue(uri.is_base64)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add tests for data_uri module
Address reviewer comments<commit_after>import os
import unittest
from node import data_uri
TEST_TXT_FILE = "data_uri_test.txt"
TEST_CONTENTS = "foo\n"
class TestDataURI(unittest.TestCase):
def setUp(self):
with open(TEST_TXT_FILE, 'w') as f:
f.write(TEST_CONTENTS)
def tearDown(self):
os.remove(TEST_TXT_FILE)
def test_init_from_file(self):
uri = data_uri.DataURI.from_file(TEST_TXT_FILE, base64=False)
self.assertEqual(uri.data, TEST_CONTENTS)
self.assertEqual(uri.mimetype, "text/plain")
self.assertFalse(uri.is_base64)
def test_init_from_args(self):
data = "I like trains"
charset = 'us-ascii'
mime = 'text/plain'
uri = data_uri.DataURI.make(
mime,
charset,
base64=True,
data=data)
self.assertEqual(uri.data, data)
self.assertEqual(uri.charset, charset)
self.assertEqual(uri.mimetype, mime)
self.assertTrue(uri.is_base64)
if __name__ == '__main__':
unittest.main()
|
|
455627ee963f1e61f1e9986fa00fa62318d8c179
|
test/test_issuexxx.py
|
test/test_issuexxx.py
|
from rdflib import Graph, Namespace, URIRef, Literal
from rdflib.compare import to_isomorphic
import unittest
class TestIssueXXXX(unittest.TestCase):
def test_issuexxxx(self):
PROV = Namespace('http://www.w3.org/ns/prov#')
bob = URIRef("http://example.org/object/Bob")
value = Literal(float("inf"))
# g1 is a simple graph with one attribute having an infinite value
g1 = Graph()
g1.add((bob, PROV.value, value))
# Build g2 out of the deserialisation of g1 serialisation
g2 = Graph()
g2.parse(data=g1.serialize(format='turtle'), format='turtle')
self.assertTrue(g1.serialize(
format='turtle') == g2.serialize(format='turtle'))
self.assertTrue(to_isomorphic(g1) == to_isomorphic(g2))
if __name__ == "__main__":
unittest.main()
|
Test serialisation of infinite values
|
Test serialisation of infinite values
|
Python
|
bsd-3-clause
|
RDFLib/rdflib,RDFLib/rdflib,RDFLib/rdflib,RDFLib/rdflib
|
Test serialisation of infinite values
|
from rdflib import Graph, Namespace, URIRef, Literal
from rdflib.compare import to_isomorphic
import unittest
class TestIssueXXXX(unittest.TestCase):
def test_issuexxxx(self):
PROV = Namespace('http://www.w3.org/ns/prov#')
bob = URIRef("http://example.org/object/Bob")
value = Literal(float("inf"))
# g1 is a simple graph with one attribute having an infinite value
g1 = Graph()
g1.add((bob, PROV.value, value))
# Build g2 out of the deserialisation of g1 serialisation
g2 = Graph()
g2.parse(data=g1.serialize(format='turtle'), format='turtle')
self.assertTrue(g1.serialize(
format='turtle') == g2.serialize(format='turtle'))
self.assertTrue(to_isomorphic(g1) == to_isomorphic(g2))
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Test serialisation of infinite values<commit_after>
|
from rdflib import Graph, Namespace, URIRef, Literal
from rdflib.compare import to_isomorphic
import unittest
class TestIssueXXXX(unittest.TestCase):
def test_issuexxxx(self):
PROV = Namespace('http://www.w3.org/ns/prov#')
bob = URIRef("http://example.org/object/Bob")
value = Literal(float("inf"))
# g1 is a simple graph with one attribute having an infinite value
g1 = Graph()
g1.add((bob, PROV.value, value))
# Build g2 out of the deserialisation of g1 serialisation
g2 = Graph()
g2.parse(data=g1.serialize(format='turtle'), format='turtle')
self.assertTrue(g1.serialize(
format='turtle') == g2.serialize(format='turtle'))
self.assertTrue(to_isomorphic(g1) == to_isomorphic(g2))
if __name__ == "__main__":
unittest.main()
|
Test serialisation of infinite valuesfrom rdflib import Graph, Namespace, URIRef, Literal
from rdflib.compare import to_isomorphic
import unittest
class TestIssueXXXX(unittest.TestCase):
def test_issuexxxx(self):
PROV = Namespace('http://www.w3.org/ns/prov#')
bob = URIRef("http://example.org/object/Bob")
value = Literal(float("inf"))
# g1 is a simple graph with one attribute having an infinite value
g1 = Graph()
g1.add((bob, PROV.value, value))
# Build g2 out of the deserialisation of g1 serialisation
g2 = Graph()
g2.parse(data=g1.serialize(format='turtle'), format='turtle')
self.assertTrue(g1.serialize(
format='turtle') == g2.serialize(format='turtle'))
self.assertTrue(to_isomorphic(g1) == to_isomorphic(g2))
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Test serialisation of infinite values<commit_after>from rdflib import Graph, Namespace, URIRef, Literal
from rdflib.compare import to_isomorphic
import unittest
class TestIssueXXXX(unittest.TestCase):
def test_issuexxxx(self):
PROV = Namespace('http://www.w3.org/ns/prov#')
bob = URIRef("http://example.org/object/Bob")
value = Literal(float("inf"))
# g1 is a simple graph with one attribute having an infinite value
g1 = Graph()
g1.add((bob, PROV.value, value))
# Build g2 out of the deserialisation of g1 serialisation
g2 = Graph()
g2.parse(data=g1.serialize(format='turtle'), format='turtle')
self.assertTrue(g1.serialize(
format='turtle') == g2.serialize(format='turtle'))
self.assertTrue(to_isomorphic(g1) == to_isomorphic(g2))
if __name__ == "__main__":
unittest.main()
|
|
31c3aa17875bf6e3c51d0a8b47500cd4f234f002
|
knights/k_tags.py
|
knights/k_tags.py
|
import ast
from .klass import build_method
from .library import Library
register = Library()
def parse_args(bits):
'''
Parse tag bits as if they're function args
'''
code = ast.parse('x(%s)' % bits, mode='eval')
return code.body.args, code.body.keywords
@register.tag(name='block')
def block(state, token):
token = token.strip()
func = build_method(state, token, endnode='endblock')
state['methods'].append(func)
return ast.YieldFrom(
value=ast.Call(
func=ast.Attribute(
value=ast.Name(id='self', ctx=ast.Load()),
attr=token,
ctx=ast.Load()
),
args=[
ast.Name(id='context', ctx=ast.Load()),
],
keywords=[], starargs=None, kwargs=None
)
)
|
Add basic block tag implementation
|
Add basic block tag implementation
|
Python
|
mit
|
funkybob/knights-templater,funkybob/knights-templater
|
Add basic block tag implementation
|
import ast
from .klass import build_method
from .library import Library
register = Library()
def parse_args(bits):
'''
Parse tag bits as if they're function args
'''
code = ast.parse('x(%s)' % bits, mode='eval')
return code.body.args, code.body.keywords
@register.tag(name='block')
def block(state, token):
token = token.strip()
func = build_method(state, token, endnode='endblock')
state['methods'].append(func)
return ast.YieldFrom(
value=ast.Call(
func=ast.Attribute(
value=ast.Name(id='self', ctx=ast.Load()),
attr=token,
ctx=ast.Load()
),
args=[
ast.Name(id='context', ctx=ast.Load()),
],
keywords=[], starargs=None, kwargs=None
)
)
|
<commit_before><commit_msg>Add basic block tag implementation<commit_after>
|
import ast
from .klass import build_method
from .library import Library
register = Library()
def parse_args(bits):
'''
Parse tag bits as if they're function args
'''
code = ast.parse('x(%s)' % bits, mode='eval')
return code.body.args, code.body.keywords
@register.tag(name='block')
def block(state, token):
token = token.strip()
func = build_method(state, token, endnode='endblock')
state['methods'].append(func)
return ast.YieldFrom(
value=ast.Call(
func=ast.Attribute(
value=ast.Name(id='self', ctx=ast.Load()),
attr=token,
ctx=ast.Load()
),
args=[
ast.Name(id='context', ctx=ast.Load()),
],
keywords=[], starargs=None, kwargs=None
)
)
|
Add basic block tag implementation
import ast
from .klass import build_method
from .library import Library
register = Library()
def parse_args(bits):
'''
Parse tag bits as if they're function args
'''
code = ast.parse('x(%s)' % bits, mode='eval')
return code.body.args, code.body.keywords
@register.tag(name='block')
def block(state, token):
token = token.strip()
func = build_method(state, token, endnode='endblock')
state['methods'].append(func)
return ast.YieldFrom(
value=ast.Call(
func=ast.Attribute(
value=ast.Name(id='self', ctx=ast.Load()),
attr=token,
ctx=ast.Load()
),
args=[
ast.Name(id='context', ctx=ast.Load()),
],
keywords=[], starargs=None, kwargs=None
)
)
|
<commit_before><commit_msg>Add basic block tag implementation<commit_after>
import ast
from .klass import build_method
from .library import Library
register = Library()
def parse_args(bits):
'''
Parse tag bits as if they're function args
'''
code = ast.parse('x(%s)' % bits, mode='eval')
return code.body.args, code.body.keywords
@register.tag(name='block')
def block(state, token):
token = token.strip()
func = build_method(state, token, endnode='endblock')
state['methods'].append(func)
return ast.YieldFrom(
value=ast.Call(
func=ast.Attribute(
value=ast.Name(id='self', ctx=ast.Load()),
attr=token,
ctx=ast.Load()
),
args=[
ast.Name(id='context', ctx=ast.Load()),
],
keywords=[], starargs=None, kwargs=None
)
)
|
|
1f1ffb3071f5ba3215b47745896352496df199fa
|
bin/cobbler/migrate_ks.py
|
bin/cobbler/migrate_ks.py
|
#!/usr/bin/python
import xmlrpclib
import logging
from compass.utils import setting_wrapper as setting
def main():
remote = xmlrpclib.Server(setting.COBBLER_INSTALLER_URL, allow_none=True)
token = remote.login(*setting.COBBLER_INSTALLER_TOKEN)
systems = remote.get_systems(token)
for system in systems:
data = remote.generate_kickstart('', system['name'])
try:
with open('/var/www/cblr_ks/%s' % system['name'], 'w') as f:
logging.info("Migrating kickstart for %s", system['name'])
f.write(data)
except:
logging.error("Directory /var/www/cblr_ks/ does not exist.")
if __name__ == '__main__':
logging.info("Running kickstart migration")
main()
|
Migrate rendered kickstarts to solve cobbler single-threading issues
|
Migrate rendered kickstarts to solve cobbler single-threading issues
Change-Id: I431878dd1fbab44c5415739db1c6bb89788a5803
|
Python
|
apache-2.0
|
stackforge/compass-core,stackforge/compass-core,stackforge/compass-core,openstack/compass-core,baigk/compass-core,openstack/compass-core,openstack/compass-core,baigk/compass-core,stackforge/compass-core,baigk/compass-core,openstack/compass-core
|
Migrate rendered kickstarts to solve cobbler single-threading issues
Change-Id: I431878dd1fbab44c5415739db1c6bb89788a5803
|
#!/usr/bin/python
import xmlrpclib
import logging
from compass.utils import setting_wrapper as setting
def main():
remote = xmlrpclib.Server(setting.COBBLER_INSTALLER_URL, allow_none=True)
token = remote.login(*setting.COBBLER_INSTALLER_TOKEN)
systems = remote.get_systems(token)
for system in systems:
data = remote.generate_kickstart('', system['name'])
try:
with open('/var/www/cblr_ks/%s' % system['name'], 'w') as f:
logging.info("Migrating kickstart for %s", system['name'])
f.write(data)
except:
logging.error("Directory /var/www/cblr_ks/ does not exist.")
if __name__ == '__main__':
logging.info("Running kickstart migration")
main()
|
<commit_before><commit_msg>Migrate rendered kickstarts to solve cobbler single-threading issues
Change-Id: I431878dd1fbab44c5415739db1c6bb89788a5803<commit_after>
|
#!/usr/bin/python
import xmlrpclib
import logging
from compass.utils import setting_wrapper as setting
def main():
remote = xmlrpclib.Server(setting.COBBLER_INSTALLER_URL, allow_none=True)
token = remote.login(*setting.COBBLER_INSTALLER_TOKEN)
systems = remote.get_systems(token)
for system in systems:
data = remote.generate_kickstart('', system['name'])
try:
with open('/var/www/cblr_ks/%s' % system['name'], 'w') as f:
logging.info("Migrating kickstart for %s", system['name'])
f.write(data)
except:
logging.error("Directory /var/www/cblr_ks/ does not exist.")
if __name__ == '__main__':
logging.info("Running kickstart migration")
main()
|
Migrate rendered kickstarts to solve cobbler single-threading issues
Change-Id: I431878dd1fbab44c5415739db1c6bb89788a5803#!/usr/bin/python
import xmlrpclib
import logging
from compass.utils import setting_wrapper as setting
def main():
remote = xmlrpclib.Server(setting.COBBLER_INSTALLER_URL, allow_none=True)
token = remote.login(*setting.COBBLER_INSTALLER_TOKEN)
systems = remote.get_systems(token)
for system in systems:
data = remote.generate_kickstart('', system['name'])
try:
with open('/var/www/cblr_ks/%s' % system['name'], 'w') as f:
logging.info("Migrating kickstart for %s", system['name'])
f.write(data)
except:
logging.error("Directory /var/www/cblr_ks/ does not exist.")
if __name__ == '__main__':
logging.info("Running kickstart migration")
main()
|
<commit_before><commit_msg>Migrate rendered kickstarts to solve cobbler single-threading issues
Change-Id: I431878dd1fbab44c5415739db1c6bb89788a5803<commit_after>#!/usr/bin/python
import xmlrpclib
import logging
from compass.utils import setting_wrapper as setting
def main():
remote = xmlrpclib.Server(setting.COBBLER_INSTALLER_URL, allow_none=True)
token = remote.login(*setting.COBBLER_INSTALLER_TOKEN)
systems = remote.get_systems(token)
for system in systems:
data = remote.generate_kickstart('', system['name'])
try:
with open('/var/www/cblr_ks/%s' % system['name'], 'w') as f:
logging.info("Migrating kickstart for %s", system['name'])
f.write(data)
except:
logging.error("Directory /var/www/cblr_ks/ does not exist.")
if __name__ == '__main__':
logging.info("Running kickstart migration")
main()
|
|
5d705221e6e6e1d32c90bd8a6e7ee940008d91e9
|
examples/chatserver/views.py
|
examples/chatserver/views.py
|
# -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.views.generic.base import TemplateView
from django.views.decorators.csrf import csrf_exempt
import redis
from ws4redis import settings as redis_settings
class BaseTemplateView(TemplateView):
def __init__(self):
self._connection = redis.StrictRedis(**redis_settings.WS4REDIS_CONNECTION)
def get_context_data(self, **kwargs):
context = super(BaseTemplateView, self).get_context_data(**kwargs)
context.update(ws_url='ws://localhost:{SERVER_PORT}/ws/foobar'.format(**self.request.META))
return context
class BroadcastChatView(BaseTemplateView):
template_name = 'broadcast_chat.html'
def __init__(self):
super(BroadcastChatView, self).__init__()
self._connection.set('_broadcast_:foobar', 'Hello, Websockets')
class UserChatView(BaseTemplateView):
template_name = 'user_chat.html'
def get_context_data(self, **kwargs):
users = User.objects.all()
context = super(UserChatView, self).get_context_data(**kwargs)
context.update(users=users)
return context
@csrf_exempt
def post(self, request, *args, **kwargs):
channel = u'{0}:foobar'.format(request.POST.get('user'))
self._connection.publish(channel, request.POST.get('message'))
return HttpResponse('OK')
|
# -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.views.generic.base import TemplateView
from django.views.decorators.csrf import csrf_exempt
import redis
from ws4redis import settings as redis_settings
class BaseTemplateView(TemplateView):
def __init__(self):
self._connection = redis.StrictRedis(**redis_settings.WS4REDIS_CONNECTION)
def get_context_data(self, **kwargs):
context = super(BaseTemplateView, self).get_context_data(**kwargs)
context.update(ws_url='ws://{SERVER_NAME}:{SERVER_PORT}/ws/foobar'.format(**self.request.META))
return context
class BroadcastChatView(BaseTemplateView):
template_name = 'broadcast_chat.html'
def __init__(self):
super(BroadcastChatView, self).__init__()
self._connection.set('_broadcast_:foobar', 'Hello, Websockets')
class UserChatView(BaseTemplateView):
template_name = 'user_chat.html'
def get_context_data(self, **kwargs):
users = User.objects.all()
context = super(UserChatView, self).get_context_data(**kwargs)
context.update(users=users)
return context
@csrf_exempt
def post(self, request, *args, **kwargs):
channel = u'{0}:foobar'.format(request.POST.get('user'))
self._connection.publish(channel, request.POST.get('message'))
return HttpResponse('OK')
|
Use META.SERVER_NAME in template view. …
|
Use META.SERVER_NAME in template view. …
|
Python
|
mit
|
yacneyac/django-websocket-redis,0nkery/django-websocket-redis3,yacneyac/django-websocket-redis,jgroszko/django-websocket-redis,0nkery/django-websocket-redis3,jgroszko/django-websocket-redis,malefice/django-websocket-redis,Frky/django-websocket-redis,Frky/django-websocket-redis,jrief/django-websocket-redis,malefice/django-websocket-redis,ojarva/django-websocket-redis,ojarva/django-websocket-redis,jrief/django-websocket-redis,Frky/django-websocket-redis
|
# -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.views.generic.base import TemplateView
from django.views.decorators.csrf import csrf_exempt
import redis
from ws4redis import settings as redis_settings
class BaseTemplateView(TemplateView):
def __init__(self):
self._connection = redis.StrictRedis(**redis_settings.WS4REDIS_CONNECTION)
def get_context_data(self, **kwargs):
context = super(BaseTemplateView, self).get_context_data(**kwargs)
context.update(ws_url='ws://localhost:{SERVER_PORT}/ws/foobar'.format(**self.request.META))
return context
class BroadcastChatView(BaseTemplateView):
template_name = 'broadcast_chat.html'
def __init__(self):
super(BroadcastChatView, self).__init__()
self._connection.set('_broadcast_:foobar', 'Hello, Websockets')
class UserChatView(BaseTemplateView):
template_name = 'user_chat.html'
def get_context_data(self, **kwargs):
users = User.objects.all()
context = super(UserChatView, self).get_context_data(**kwargs)
context.update(users=users)
return context
@csrf_exempt
def post(self, request, *args, **kwargs):
channel = u'{0}:foobar'.format(request.POST.get('user'))
self._connection.publish(channel, request.POST.get('message'))
return HttpResponse('OK')
Use META.SERVER_NAME in template view. …
|
# -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.views.generic.base import TemplateView
from django.views.decorators.csrf import csrf_exempt
import redis
from ws4redis import settings as redis_settings
class BaseTemplateView(TemplateView):
def __init__(self):
self._connection = redis.StrictRedis(**redis_settings.WS4REDIS_CONNECTION)
def get_context_data(self, **kwargs):
context = super(BaseTemplateView, self).get_context_data(**kwargs)
context.update(ws_url='ws://{SERVER_NAME}:{SERVER_PORT}/ws/foobar'.format(**self.request.META))
return context
class BroadcastChatView(BaseTemplateView):
template_name = 'broadcast_chat.html'
def __init__(self):
super(BroadcastChatView, self).__init__()
self._connection.set('_broadcast_:foobar', 'Hello, Websockets')
class UserChatView(BaseTemplateView):
template_name = 'user_chat.html'
def get_context_data(self, **kwargs):
users = User.objects.all()
context = super(UserChatView, self).get_context_data(**kwargs)
context.update(users=users)
return context
@csrf_exempt
def post(self, request, *args, **kwargs):
channel = u'{0}:foobar'.format(request.POST.get('user'))
self._connection.publish(channel, request.POST.get('message'))
return HttpResponse('OK')
|
<commit_before># -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.views.generic.base import TemplateView
from django.views.decorators.csrf import csrf_exempt
import redis
from ws4redis import settings as redis_settings
class BaseTemplateView(TemplateView):
def __init__(self):
self._connection = redis.StrictRedis(**redis_settings.WS4REDIS_CONNECTION)
def get_context_data(self, **kwargs):
context = super(BaseTemplateView, self).get_context_data(**kwargs)
context.update(ws_url='ws://localhost:{SERVER_PORT}/ws/foobar'.format(**self.request.META))
return context
class BroadcastChatView(BaseTemplateView):
template_name = 'broadcast_chat.html'
def __init__(self):
super(BroadcastChatView, self).__init__()
self._connection.set('_broadcast_:foobar', 'Hello, Websockets')
class UserChatView(BaseTemplateView):
template_name = 'user_chat.html'
def get_context_data(self, **kwargs):
users = User.objects.all()
context = super(UserChatView, self).get_context_data(**kwargs)
context.update(users=users)
return context
@csrf_exempt
def post(self, request, *args, **kwargs):
channel = u'{0}:foobar'.format(request.POST.get('user'))
self._connection.publish(channel, request.POST.get('message'))
return HttpResponse('OK')
<commit_msg>Use META.SERVER_NAME in template view. …<commit_after>
|
# -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.views.generic.base import TemplateView
from django.views.decorators.csrf import csrf_exempt
import redis
from ws4redis import settings as redis_settings
class BaseTemplateView(TemplateView):
def __init__(self):
self._connection = redis.StrictRedis(**redis_settings.WS4REDIS_CONNECTION)
def get_context_data(self, **kwargs):
context = super(BaseTemplateView, self).get_context_data(**kwargs)
context.update(ws_url='ws://{SERVER_NAME}:{SERVER_PORT}/ws/foobar'.format(**self.request.META))
return context
class BroadcastChatView(BaseTemplateView):
template_name = 'broadcast_chat.html'
def __init__(self):
super(BroadcastChatView, self).__init__()
self._connection.set('_broadcast_:foobar', 'Hello, Websockets')
class UserChatView(BaseTemplateView):
template_name = 'user_chat.html'
def get_context_data(self, **kwargs):
users = User.objects.all()
context = super(UserChatView, self).get_context_data(**kwargs)
context.update(users=users)
return context
@csrf_exempt
def post(self, request, *args, **kwargs):
channel = u'{0}:foobar'.format(request.POST.get('user'))
self._connection.publish(channel, request.POST.get('message'))
return HttpResponse('OK')
|
# -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.views.generic.base import TemplateView
from django.views.decorators.csrf import csrf_exempt
import redis
from ws4redis import settings as redis_settings
class BaseTemplateView(TemplateView):
def __init__(self):
self._connection = redis.StrictRedis(**redis_settings.WS4REDIS_CONNECTION)
def get_context_data(self, **kwargs):
context = super(BaseTemplateView, self).get_context_data(**kwargs)
context.update(ws_url='ws://localhost:{SERVER_PORT}/ws/foobar'.format(**self.request.META))
return context
class BroadcastChatView(BaseTemplateView):
template_name = 'broadcast_chat.html'
def __init__(self):
super(BroadcastChatView, self).__init__()
self._connection.set('_broadcast_:foobar', 'Hello, Websockets')
class UserChatView(BaseTemplateView):
template_name = 'user_chat.html'
def get_context_data(self, **kwargs):
users = User.objects.all()
context = super(UserChatView, self).get_context_data(**kwargs)
context.update(users=users)
return context
@csrf_exempt
def post(self, request, *args, **kwargs):
channel = u'{0}:foobar'.format(request.POST.get('user'))
self._connection.publish(channel, request.POST.get('message'))
return HttpResponse('OK')
Use META.SERVER_NAME in template view. …# -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.views.generic.base import TemplateView
from django.views.decorators.csrf import csrf_exempt
import redis
from ws4redis import settings as redis_settings
class BaseTemplateView(TemplateView):
def __init__(self):
self._connection = redis.StrictRedis(**redis_settings.WS4REDIS_CONNECTION)
def get_context_data(self, **kwargs):
context = super(BaseTemplateView, self).get_context_data(**kwargs)
context.update(ws_url='ws://{SERVER_NAME}:{SERVER_PORT}/ws/foobar'.format(**self.request.META))
return context
class BroadcastChatView(BaseTemplateView):
template_name = 'broadcast_chat.html'
def __init__(self):
super(BroadcastChatView, self).__init__()
self._connection.set('_broadcast_:foobar', 'Hello, Websockets')
class UserChatView(BaseTemplateView):
template_name = 'user_chat.html'
def get_context_data(self, **kwargs):
users = User.objects.all()
context = super(UserChatView, self).get_context_data(**kwargs)
context.update(users=users)
return context
@csrf_exempt
def post(self, request, *args, **kwargs):
channel = u'{0}:foobar'.format(request.POST.get('user'))
self._connection.publish(channel, request.POST.get('message'))
return HttpResponse('OK')
|
<commit_before># -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.views.generic.base import TemplateView
from django.views.decorators.csrf import csrf_exempt
import redis
from ws4redis import settings as redis_settings
class BaseTemplateView(TemplateView):
def __init__(self):
self._connection = redis.StrictRedis(**redis_settings.WS4REDIS_CONNECTION)
def get_context_data(self, **kwargs):
context = super(BaseTemplateView, self).get_context_data(**kwargs)
context.update(ws_url='ws://localhost:{SERVER_PORT}/ws/foobar'.format(**self.request.META))
return context
class BroadcastChatView(BaseTemplateView):
template_name = 'broadcast_chat.html'
def __init__(self):
super(BroadcastChatView, self).__init__()
self._connection.set('_broadcast_:foobar', 'Hello, Websockets')
class UserChatView(BaseTemplateView):
template_name = 'user_chat.html'
def get_context_data(self, **kwargs):
users = User.objects.all()
context = super(UserChatView, self).get_context_data(**kwargs)
context.update(users=users)
return context
@csrf_exempt
def post(self, request, *args, **kwargs):
channel = u'{0}:foobar'.format(request.POST.get('user'))
self._connection.publish(channel, request.POST.get('message'))
return HttpResponse('OK')
<commit_msg>Use META.SERVER_NAME in template view. …<commit_after># -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.http import HttpResponse
from django.views.generic.base import TemplateView
from django.views.decorators.csrf import csrf_exempt
import redis
from ws4redis import settings as redis_settings
class BaseTemplateView(TemplateView):
def __init__(self):
self._connection = redis.StrictRedis(**redis_settings.WS4REDIS_CONNECTION)
def get_context_data(self, **kwargs):
context = super(BaseTemplateView, self).get_context_data(**kwargs)
context.update(ws_url='ws://{SERVER_NAME}:{SERVER_PORT}/ws/foobar'.format(**self.request.META))
return context
class BroadcastChatView(BaseTemplateView):
template_name = 'broadcast_chat.html'
def __init__(self):
super(BroadcastChatView, self).__init__()
self._connection.set('_broadcast_:foobar', 'Hello, Websockets')
class UserChatView(BaseTemplateView):
template_name = 'user_chat.html'
def get_context_data(self, **kwargs):
users = User.objects.all()
context = super(UserChatView, self).get_context_data(**kwargs)
context.update(users=users)
return context
@csrf_exempt
def post(self, request, *args, **kwargs):
channel = u'{0}:foobar'.format(request.POST.get('user'))
self._connection.publish(channel, request.POST.get('message'))
return HttpResponse('OK')
|
09cf3f450a6a3d718cf011daf6ef4862ad0181b4
|
examples/connected_region.py
|
examples/connected_region.py
|
import numpy as np
import matplotlib.pyplot as plt
import lulu.lulu_base as base
c = base.ConnectedRegion(shape=(5,5),
value=1, start_row=1,
rowptr=[0,4,6,10,14],
colptr=[2,3,4,5,0,5,0,1,2,5,0,2,3,5])
print c.todense()
dense = np.zeros((7,7,3))
dense[1:6, 1:6, 0] = c.todense()
plt.subplot(1, 3, 1)
plt.imshow(dense, interpolation='nearest')
plt.title('Connected region')
ii, jj = c.outside_boundary()
dense_outside = dense.copy()
for i, j in zip(ii, jj):
dense_outside[i + 1, j + 1] = [0, 1, 0]
plt.subplot(1, 3, 2)
plt.imshow(dense_outside, interpolation='nearest')
plt.title('Outside boundary')
ii, jj = c.inside_boundary()
dense_inside = dense.copy()
for i, j in zip(ii, jj):
dense_inside[i + 1, j + 1] = [0, 0, 1]
plt.subplot(1, 3, 3)
plt.imshow(dense_inside, interpolation='nearest')
plt.title('Inside boundary')
plt.show()
|
Add example to illustrate boundaries.
|
Add example to illustrate boundaries.
|
Python
|
bsd-3-clause
|
stefanv/lulu
|
Add example to illustrate boundaries.
|
import numpy as np
import matplotlib.pyplot as plt
import lulu.lulu_base as base
c = base.ConnectedRegion(shape=(5,5),
value=1, start_row=1,
rowptr=[0,4,6,10,14],
colptr=[2,3,4,5,0,5,0,1,2,5,0,2,3,5])
print c.todense()
dense = np.zeros((7,7,3))
dense[1:6, 1:6, 0] = c.todense()
plt.subplot(1, 3, 1)
plt.imshow(dense, interpolation='nearest')
plt.title('Connected region')
ii, jj = c.outside_boundary()
dense_outside = dense.copy()
for i, j in zip(ii, jj):
dense_outside[i + 1, j + 1] = [0, 1, 0]
plt.subplot(1, 3, 2)
plt.imshow(dense_outside, interpolation='nearest')
plt.title('Outside boundary')
ii, jj = c.inside_boundary()
dense_inside = dense.copy()
for i, j in zip(ii, jj):
dense_inside[i + 1, j + 1] = [0, 0, 1]
plt.subplot(1, 3, 3)
plt.imshow(dense_inside, interpolation='nearest')
plt.title('Inside boundary')
plt.show()
|
<commit_before><commit_msg>Add example to illustrate boundaries.<commit_after>
|
import numpy as np
import matplotlib.pyplot as plt
import lulu.lulu_base as base
c = base.ConnectedRegion(shape=(5,5),
value=1, start_row=1,
rowptr=[0,4,6,10,14],
colptr=[2,3,4,5,0,5,0,1,2,5,0,2,3,5])
print c.todense()
dense = np.zeros((7,7,3))
dense[1:6, 1:6, 0] = c.todense()
plt.subplot(1, 3, 1)
plt.imshow(dense, interpolation='nearest')
plt.title('Connected region')
ii, jj = c.outside_boundary()
dense_outside = dense.copy()
for i, j in zip(ii, jj):
dense_outside[i + 1, j + 1] = [0, 1, 0]
plt.subplot(1, 3, 2)
plt.imshow(dense_outside, interpolation='nearest')
plt.title('Outside boundary')
ii, jj = c.inside_boundary()
dense_inside = dense.copy()
for i, j in zip(ii, jj):
dense_inside[i + 1, j + 1] = [0, 0, 1]
plt.subplot(1, 3, 3)
plt.imshow(dense_inside, interpolation='nearest')
plt.title('Inside boundary')
plt.show()
|
Add example to illustrate boundaries.import numpy as np
import matplotlib.pyplot as plt
import lulu.lulu_base as base
c = base.ConnectedRegion(shape=(5,5),
value=1, start_row=1,
rowptr=[0,4,6,10,14],
colptr=[2,3,4,5,0,5,0,1,2,5,0,2,3,5])
print c.todense()
dense = np.zeros((7,7,3))
dense[1:6, 1:6, 0] = c.todense()
plt.subplot(1, 3, 1)
plt.imshow(dense, interpolation='nearest')
plt.title('Connected region')
ii, jj = c.outside_boundary()
dense_outside = dense.copy()
for i, j in zip(ii, jj):
dense_outside[i + 1, j + 1] = [0, 1, 0]
plt.subplot(1, 3, 2)
plt.imshow(dense_outside, interpolation='nearest')
plt.title('Outside boundary')
ii, jj = c.inside_boundary()
dense_inside = dense.copy()
for i, j in zip(ii, jj):
dense_inside[i + 1, j + 1] = [0, 0, 1]
plt.subplot(1, 3, 3)
plt.imshow(dense_inside, interpolation='nearest')
plt.title('Inside boundary')
plt.show()
|
<commit_before><commit_msg>Add example to illustrate boundaries.<commit_after>import numpy as np
import matplotlib.pyplot as plt
import lulu.lulu_base as base
c = base.ConnectedRegion(shape=(5,5),
value=1, start_row=1,
rowptr=[0,4,6,10,14],
colptr=[2,3,4,5,0,5,0,1,2,5,0,2,3,5])
print c.todense()
dense = np.zeros((7,7,3))
dense[1:6, 1:6, 0] = c.todense()
plt.subplot(1, 3, 1)
plt.imshow(dense, interpolation='nearest')
plt.title('Connected region')
ii, jj = c.outside_boundary()
dense_outside = dense.copy()
for i, j in zip(ii, jj):
dense_outside[i + 1, j + 1] = [0, 1, 0]
plt.subplot(1, 3, 2)
plt.imshow(dense_outside, interpolation='nearest')
plt.title('Outside boundary')
ii, jj = c.inside_boundary()
dense_inside = dense.copy()
for i, j in zip(ii, jj):
dense_inside[i + 1, j + 1] = [0, 0, 1]
plt.subplot(1, 3, 3)
plt.imshow(dense_inside, interpolation='nearest')
plt.title('Inside boundary')
plt.show()
|
|
e4b1cccddde441a4973eb3c9f741e2ce47e1d8dc
|
openprescribing/frontend/tests/test_geojson_serializer.py
|
openprescribing/frontend/tests/test_geojson_serializer.py
|
import json
from django.test import TestCase
from django.core.serializers import serialize
from frontend.models import PCT
from api.geojson_serializer import as_geojson_stream
class GeoJSONSerializerTest(TestCase):
fixtures = ['orgs']
def test_output_is_the_same_as_core_serializer(self):
fields = ['name', 'org_type', 'ons_code', 'boundary']
geo_field = 'boundary'
queryset = PCT.objects.all()
expected_json = serialize(
'geojson',
queryset,
geometry_field=geo_field,
fields=fields
)
stream = as_geojson_stream(
queryset.values(*fields),
geometry_field=geo_field
)
expected = json.loads(expected_json)
actual = json.loads(''.join(stream))
self.assertEqual(expected, actual)
|
Add test for GeoJSON serializer
|
Add test for GeoJSON serializer
|
Python
|
mit
|
ebmdatalab/openprescribing,ebmdatalab/openprescribing,annapowellsmith/openpresc,ebmdatalab/openprescribing,annapowellsmith/openpresc,annapowellsmith/openpresc,annapowellsmith/openpresc,ebmdatalab/openprescribing
|
Add test for GeoJSON serializer
|
import json
from django.test import TestCase
from django.core.serializers import serialize
from frontend.models import PCT
from api.geojson_serializer import as_geojson_stream
class GeoJSONSerializerTest(TestCase):
fixtures = ['orgs']
def test_output_is_the_same_as_core_serializer(self):
fields = ['name', 'org_type', 'ons_code', 'boundary']
geo_field = 'boundary'
queryset = PCT.objects.all()
expected_json = serialize(
'geojson',
queryset,
geometry_field=geo_field,
fields=fields
)
stream = as_geojson_stream(
queryset.values(*fields),
geometry_field=geo_field
)
expected = json.loads(expected_json)
actual = json.loads(''.join(stream))
self.assertEqual(expected, actual)
|
<commit_before><commit_msg>Add test for GeoJSON serializer<commit_after>
|
import json
from django.test import TestCase
from django.core.serializers import serialize
from frontend.models import PCT
from api.geojson_serializer import as_geojson_stream
class GeoJSONSerializerTest(TestCase):
fixtures = ['orgs']
def test_output_is_the_same_as_core_serializer(self):
fields = ['name', 'org_type', 'ons_code', 'boundary']
geo_field = 'boundary'
queryset = PCT.objects.all()
expected_json = serialize(
'geojson',
queryset,
geometry_field=geo_field,
fields=fields
)
stream = as_geojson_stream(
queryset.values(*fields),
geometry_field=geo_field
)
expected = json.loads(expected_json)
actual = json.loads(''.join(stream))
self.assertEqual(expected, actual)
|
Add test for GeoJSON serializerimport json
from django.test import TestCase
from django.core.serializers import serialize
from frontend.models import PCT
from api.geojson_serializer import as_geojson_stream
class GeoJSONSerializerTest(TestCase):
fixtures = ['orgs']
def test_output_is_the_same_as_core_serializer(self):
fields = ['name', 'org_type', 'ons_code', 'boundary']
geo_field = 'boundary'
queryset = PCT.objects.all()
expected_json = serialize(
'geojson',
queryset,
geometry_field=geo_field,
fields=fields
)
stream = as_geojson_stream(
queryset.values(*fields),
geometry_field=geo_field
)
expected = json.loads(expected_json)
actual = json.loads(''.join(stream))
self.assertEqual(expected, actual)
|
<commit_before><commit_msg>Add test for GeoJSON serializer<commit_after>import json
from django.test import TestCase
from django.core.serializers import serialize
from frontend.models import PCT
from api.geojson_serializer import as_geojson_stream
class GeoJSONSerializerTest(TestCase):
fixtures = ['orgs']
def test_output_is_the_same_as_core_serializer(self):
fields = ['name', 'org_type', 'ons_code', 'boundary']
geo_field = 'boundary'
queryset = PCT.objects.all()
expected_json = serialize(
'geojson',
queryset,
geometry_field=geo_field,
fields=fields
)
stream = as_geojson_stream(
queryset.values(*fields),
geometry_field=geo_field
)
expected = json.loads(expected_json)
actual = json.loads(''.join(stream))
self.assertEqual(expected, actual)
|
|
8f9ee7f7ad751a3d785e1063c7bbf52b4c8a8de2
|
migrations/versions/0236_another_letter_org.py
|
migrations/versions/0236_another_letter_org.py
|
"""empty message
Revision ID: 0236_another_letter_org
Revises: 0235_add_postage_to_pk
"""
# revision identifiers, used by Alembic.
revision = '0236_another_letter_org'
down_revision = '0235_add_postage_to_pk'
from alembic import op
NEW_ORGANISATIONS = [
('514', 'Brighton and Hove city council'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
Add letter org for Brighton and Hove
|
Add letter org for Brighton and Hove
|
Python
|
mit
|
alphagov/notifications-api,alphagov/notifications-api
|
Add letter org for Brighton and Hove
|
"""empty message
Revision ID: 0236_another_letter_org
Revises: 0235_add_postage_to_pk
"""
# revision identifiers, used by Alembic.
revision = '0236_another_letter_org'
down_revision = '0235_add_postage_to_pk'
from alembic import op
NEW_ORGANISATIONS = [
('514', 'Brighton and Hove city council'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
<commit_before><commit_msg>Add letter org for Brighton and Hove<commit_after>
|
"""empty message
Revision ID: 0236_another_letter_org
Revises: 0235_add_postage_to_pk
"""
# revision identifiers, used by Alembic.
revision = '0236_another_letter_org'
down_revision = '0235_add_postage_to_pk'
from alembic import op
NEW_ORGANISATIONS = [
('514', 'Brighton and Hove city council'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
Add letter org for Brighton and Hove"""empty message
Revision ID: 0236_another_letter_org
Revises: 0235_add_postage_to_pk
"""
# revision identifiers, used by Alembic.
revision = '0236_another_letter_org'
down_revision = '0235_add_postage_to_pk'
from alembic import op
NEW_ORGANISATIONS = [
('514', 'Brighton and Hove city council'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
<commit_before><commit_msg>Add letter org for Brighton and Hove<commit_after>"""empty message
Revision ID: 0236_another_letter_org
Revises: 0235_add_postage_to_pk
"""
# revision identifiers, used by Alembic.
revision = '0236_another_letter_org'
down_revision = '0235_add_postage_to_pk'
from alembic import op
NEW_ORGANISATIONS = [
('514', 'Brighton and Hove city council'),
]
def upgrade():
for numeric_id, name in NEW_ORGANISATIONS:
op.execute("""
INSERT
INTO dvla_organisation
VALUES ('{}', '{}')
""".format(numeric_id, name))
def downgrade():
for numeric_id, _ in NEW_ORGANISATIONS:
op.execute("""
DELETE
FROM dvla_organisation
WHERE id = '{}'
""".format(numeric_id))
|
|
449285d8136015fe8c0ecd1c756d77b21dfba6e9
|
html/cli-get-html-hyperlinks.py
|
html/cli-get-html-hyperlinks.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
from HTMLParser import HTMLParser
# HTML Hyperlink(s) getter
class GetHyperlinks(HTMLParser):
def __init__(self):
self.reset()
self.links = []
def handle_starttag(self, tag, attrs):
if tag == 'a':
for attr in attrs:
if attr[0] == 'href':
self.links.append(attr[1])
def get_links(self):
return self.links
# open the url/page
response = urllib2.urlopen("https://www.python.org/")
html = response.read()
# get HTML's hyperlinks
gh = GetHyperlinks()
gh.feed(html)
# iterate through result and print 'em out
links = gh.get_links()
for link in links:
print(link)
|
Add HTML hyperlink (<a> tag) finder/getter
|
Add HTML hyperlink (<a> tag) finder/getter
|
Python
|
mit
|
rawswift/python-collections
|
Add HTML hyperlink (<a> tag) finder/getter
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
from HTMLParser import HTMLParser
# HTML Hyperlink(s) getter
class GetHyperlinks(HTMLParser):
def __init__(self):
self.reset()
self.links = []
def handle_starttag(self, tag, attrs):
if tag == 'a':
for attr in attrs:
if attr[0] == 'href':
self.links.append(attr[1])
def get_links(self):
return self.links
# open the url/page
response = urllib2.urlopen("https://www.python.org/")
html = response.read()
# get HTML's hyperlinks
gh = GetHyperlinks()
gh.feed(html)
# iterate through result and print 'em out
links = gh.get_links()
for link in links:
print(link)
|
<commit_before><commit_msg>Add HTML hyperlink (<a> tag) finder/getter<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
from HTMLParser import HTMLParser
# HTML Hyperlink(s) getter
class GetHyperlinks(HTMLParser):
def __init__(self):
self.reset()
self.links = []
def handle_starttag(self, tag, attrs):
if tag == 'a':
for attr in attrs:
if attr[0] == 'href':
self.links.append(attr[1])
def get_links(self):
return self.links
# open the url/page
response = urllib2.urlopen("https://www.python.org/")
html = response.read()
# get HTML's hyperlinks
gh = GetHyperlinks()
gh.feed(html)
# iterate through result and print 'em out
links = gh.get_links()
for link in links:
print(link)
|
Add HTML hyperlink (<a> tag) finder/getter#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
from HTMLParser import HTMLParser
# HTML Hyperlink(s) getter
class GetHyperlinks(HTMLParser):
def __init__(self):
self.reset()
self.links = []
def handle_starttag(self, tag, attrs):
if tag == 'a':
for attr in attrs:
if attr[0] == 'href':
self.links.append(attr[1])
def get_links(self):
return self.links
# open the url/page
response = urllib2.urlopen("https://www.python.org/")
html = response.read()
# get HTML's hyperlinks
gh = GetHyperlinks()
gh.feed(html)
# iterate through result and print 'em out
links = gh.get_links()
for link in links:
print(link)
|
<commit_before><commit_msg>Add HTML hyperlink (<a> tag) finder/getter<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
import urllib2
from HTMLParser import HTMLParser
# HTML Hyperlink(s) getter
class GetHyperlinks(HTMLParser):
def __init__(self):
self.reset()
self.links = []
def handle_starttag(self, tag, attrs):
if tag == 'a':
for attr in attrs:
if attr[0] == 'href':
self.links.append(attr[1])
def get_links(self):
return self.links
# open the url/page
response = urllib2.urlopen("https://www.python.org/")
html = response.read()
# get HTML's hyperlinks
gh = GetHyperlinks()
gh.feed(html)
# iterate through result and print 'em out
links = gh.get_links()
for link in links:
print(link)
|
|
923c463e4a8e2da3b2cbe60d7351a3036257551d
|
migrations/versions/65fc9ede4746_add_is_draft_status_to_queries_and_.py
|
migrations/versions/65fc9ede4746_add_is_draft_status_to_queries_and_.py
|
"""Add is_draft status to queries and dashboards
Revision ID: 65fc9ede4746
Revises:
Create Date: 2016-12-07 18:08:13.395586
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
from sqlalchemy.exc import ProgrammingError
revision = '65fc9ede4746'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.get_bind()
try:
op.add_column('queries', sa.Column('is_draft', sa.Boolean, default=True, index=True))
op.add_column('dashboards', sa.Column('is_draft', sa.Boolean, default=True, index=True))
op.execute("UPDATE queries SET is_draft = (name = 'New Query')")
op.execute("UPDATE dashboards SET is_draft = false")
except ProgrammingError as e:
# The columns might exist if you ran the old migrations.
if 'column "is_draft" of relation "queries" already exists' in e.message:
print "*** Skipping creationg of is_draft columns as they already exist."
op.execute("ROLLBACK")
def downgrade():
op.drop_column('queries', 'is_draft')
op.drop_column('dashboards', 'is_draft')
|
Add migration for the is_draft column
|
Add migration for the is_draft column
|
Python
|
bsd-2-clause
|
stefanseifert/redash,amino-data/redash,getredash/redash,alexanderlz/redash,alexanderlz/redash,vishesh92/redash,imsally/redash,amino-data/redash,chriszs/redash,useabode/redash,44px/redash,EverlyWell/redash,imsally/redash,hudl/redash,amino-data/redash,rockwotj/redash,rockwotj/redash,M32Media/redash,hudl/redash,alexanderlz/redash,imsally/redash,hudl/redash,44px/redash,getredash/redash,alexanderlz/redash,44px/redash,moritz9/redash,M32Media/redash,denisov-vlad/redash,hudl/redash,denisov-vlad/redash,useabode/redash,moritz9/redash,getredash/redash,useabode/redash,rockwotj/redash,crowdworks/redash,vishesh92/redash,stefanseifert/redash,M32Media/redash,EverlyWell/redash,rockwotj/redash,moritz9/redash,stefanseifert/redash,amino-data/redash,imsally/redash,getredash/redash,vishesh92/redash,chriszs/redash,M32Media/redash,44px/redash,moritz9/redash,getredash/redash,stefanseifert/redash,denisov-vlad/redash,chriszs/redash,EverlyWell/redash,vishesh92/redash,useabode/redash,denisov-vlad/redash,crowdworks/redash,stefanseifert/redash,crowdworks/redash,EverlyWell/redash,crowdworks/redash,denisov-vlad/redash,chriszs/redash
|
Add migration for the is_draft column
|
"""Add is_draft status to queries and dashboards
Revision ID: 65fc9ede4746
Revises:
Create Date: 2016-12-07 18:08:13.395586
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
from sqlalchemy.exc import ProgrammingError
revision = '65fc9ede4746'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.get_bind()
try:
op.add_column('queries', sa.Column('is_draft', sa.Boolean, default=True, index=True))
op.add_column('dashboards', sa.Column('is_draft', sa.Boolean, default=True, index=True))
op.execute("UPDATE queries SET is_draft = (name = 'New Query')")
op.execute("UPDATE dashboards SET is_draft = false")
except ProgrammingError as e:
# The columns might exist if you ran the old migrations.
if 'column "is_draft" of relation "queries" already exists' in e.message:
print "*** Skipping creationg of is_draft columns as they already exist."
op.execute("ROLLBACK")
def downgrade():
op.drop_column('queries', 'is_draft')
op.drop_column('dashboards', 'is_draft')
|
<commit_before><commit_msg>Add migration for the is_draft column<commit_after>
|
"""Add is_draft status to queries and dashboards
Revision ID: 65fc9ede4746
Revises:
Create Date: 2016-12-07 18:08:13.395586
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
from sqlalchemy.exc import ProgrammingError
revision = '65fc9ede4746'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.get_bind()
try:
op.add_column('queries', sa.Column('is_draft', sa.Boolean, default=True, index=True))
op.add_column('dashboards', sa.Column('is_draft', sa.Boolean, default=True, index=True))
op.execute("UPDATE queries SET is_draft = (name = 'New Query')")
op.execute("UPDATE dashboards SET is_draft = false")
except ProgrammingError as e:
# The columns might exist if you ran the old migrations.
if 'column "is_draft" of relation "queries" already exists' in e.message:
print "*** Skipping creationg of is_draft columns as they already exist."
op.execute("ROLLBACK")
def downgrade():
op.drop_column('queries', 'is_draft')
op.drop_column('dashboards', 'is_draft')
|
Add migration for the is_draft column"""Add is_draft status to queries and dashboards
Revision ID: 65fc9ede4746
Revises:
Create Date: 2016-12-07 18:08:13.395586
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
from sqlalchemy.exc import ProgrammingError
revision = '65fc9ede4746'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.get_bind()
try:
op.add_column('queries', sa.Column('is_draft', sa.Boolean, default=True, index=True))
op.add_column('dashboards', sa.Column('is_draft', sa.Boolean, default=True, index=True))
op.execute("UPDATE queries SET is_draft = (name = 'New Query')")
op.execute("UPDATE dashboards SET is_draft = false")
except ProgrammingError as e:
# The columns might exist if you ran the old migrations.
if 'column "is_draft" of relation "queries" already exists' in e.message:
print "*** Skipping creationg of is_draft columns as they already exist."
op.execute("ROLLBACK")
def downgrade():
op.drop_column('queries', 'is_draft')
op.drop_column('dashboards', 'is_draft')
|
<commit_before><commit_msg>Add migration for the is_draft column<commit_after>"""Add is_draft status to queries and dashboards
Revision ID: 65fc9ede4746
Revises:
Create Date: 2016-12-07 18:08:13.395586
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
from sqlalchemy.exc import ProgrammingError
revision = '65fc9ede4746'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
op.get_bind()
try:
op.add_column('queries', sa.Column('is_draft', sa.Boolean, default=True, index=True))
op.add_column('dashboards', sa.Column('is_draft', sa.Boolean, default=True, index=True))
op.execute("UPDATE queries SET is_draft = (name = 'New Query')")
op.execute("UPDATE dashboards SET is_draft = false")
except ProgrammingError as e:
# The columns might exist if you ran the old migrations.
if 'column "is_draft" of relation "queries" already exists' in e.message:
print "*** Skipping creationg of is_draft columns as they already exist."
op.execute("ROLLBACK")
def downgrade():
op.drop_column('queries', 'is_draft')
op.drop_column('dashboards', 'is_draft')
|
|
617819e7aff47f2764d13bd183080dea54f689f9
|
test_equil_solver.py
|
test_equil_solver.py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 5 09:44:38 2015
@author: jensv
"""
import numpy as np
import numpy.testing as test
import scipy.integrate as integrate
import equil_solver as es
from scipy.interpolate import splev
from nose.tools import with_setup
test_equil = None
def setup_func():
r"""
Generate test equilibrium.
"""
global test_equil
test_equil = es.UnitlessSmoothedCoreSkin(core_radius_norm=0.9,
transition_width_norm=0.033,
skin_width_norm=0.034,
epsilon=0.9, lambda_bar=.5)
def teardown_func():
pass
@with_setup(setup_func, teardown_func)
def test_epsilon():
r"""
Test that ratio of b_theta gives epsilon.
"""
r_core = test_equil.core_radius
a = (test_equil.core_radius + 2.*test_equil.transition_width +
test_equil.skin_width)
b_theta_tck = test_equil.get_tck_splines()['b_theta']
epsilon_from_b_theta_ratio = (splev(r_core, b_theta_tck) /
splev(a, b_theta_tck))
test.assert_approx_equal(epsilon_from_b_theta_ratio, test_equil.epsilon,
significant=3)
@with_setup(setup_func, teardown_func)
def test_lambda_bar():
r"""
Test that lambda bar is given by ratio of total current to magnetic flux.
"""
a = (test_equil.core_radius + 2.*test_equil.transition_width +
test_equil.skin_width)
b_theta_tck = test_equil.get_tck_splines()['b_theta']
b_z_tck = test_equil.get_tck_splines()['b_z']
calculated_lambda = (2.*splev(a, b_theta_tck) /
(splev(a, b_z_tck)))
test.assert_approx_equal(calculated_lambda, test_equil.lambda_bar,
significant=3)
|
Add tests for equilibrium profiles.
|
Add tests for equilibrium profiles.
|
Python
|
mit
|
jensv/fluxtubestability,jensv/fluxtubestability
|
Add tests for equilibrium profiles.
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 5 09:44:38 2015
@author: jensv
"""
import numpy as np
import numpy.testing as test
import scipy.integrate as integrate
import equil_solver as es
from scipy.interpolate import splev
from nose.tools import with_setup
test_equil = None
def setup_func():
r"""
Generate test equilibrium.
"""
global test_equil
test_equil = es.UnitlessSmoothedCoreSkin(core_radius_norm=0.9,
transition_width_norm=0.033,
skin_width_norm=0.034,
epsilon=0.9, lambda_bar=.5)
def teardown_func():
pass
@with_setup(setup_func, teardown_func)
def test_epsilon():
r"""
Test that ratio of b_theta gives epsilon.
"""
r_core = test_equil.core_radius
a = (test_equil.core_radius + 2.*test_equil.transition_width +
test_equil.skin_width)
b_theta_tck = test_equil.get_tck_splines()['b_theta']
epsilon_from_b_theta_ratio = (splev(r_core, b_theta_tck) /
splev(a, b_theta_tck))
test.assert_approx_equal(epsilon_from_b_theta_ratio, test_equil.epsilon,
significant=3)
@with_setup(setup_func, teardown_func)
def test_lambda_bar():
r"""
Test that lambda bar is given by ratio of total current to magnetic flux.
"""
a = (test_equil.core_radius + 2.*test_equil.transition_width +
test_equil.skin_width)
b_theta_tck = test_equil.get_tck_splines()['b_theta']
b_z_tck = test_equil.get_tck_splines()['b_z']
calculated_lambda = (2.*splev(a, b_theta_tck) /
(splev(a, b_z_tck)))
test.assert_approx_equal(calculated_lambda, test_equil.lambda_bar,
significant=3)
|
<commit_before><commit_msg>Add tests for equilibrium profiles.<commit_after>
|
# -*- coding: utf-8 -*-
"""
Created on Thu Nov 5 09:44:38 2015
@author: jensv
"""
import numpy as np
import numpy.testing as test
import scipy.integrate as integrate
import equil_solver as es
from scipy.interpolate import splev
from nose.tools import with_setup
test_equil = None
def setup_func():
r"""
Generate test equilibrium.
"""
global test_equil
test_equil = es.UnitlessSmoothedCoreSkin(core_radius_norm=0.9,
transition_width_norm=0.033,
skin_width_norm=0.034,
epsilon=0.9, lambda_bar=.5)
def teardown_func():
pass
@with_setup(setup_func, teardown_func)
def test_epsilon():
r"""
Test that ratio of b_theta gives epsilon.
"""
r_core = test_equil.core_radius
a = (test_equil.core_radius + 2.*test_equil.transition_width +
test_equil.skin_width)
b_theta_tck = test_equil.get_tck_splines()['b_theta']
epsilon_from_b_theta_ratio = (splev(r_core, b_theta_tck) /
splev(a, b_theta_tck))
test.assert_approx_equal(epsilon_from_b_theta_ratio, test_equil.epsilon,
significant=3)
@with_setup(setup_func, teardown_func)
def test_lambda_bar():
r"""
Test that lambda bar is given by ratio of total current to magnetic flux.
"""
a = (test_equil.core_radius + 2.*test_equil.transition_width +
test_equil.skin_width)
b_theta_tck = test_equil.get_tck_splines()['b_theta']
b_z_tck = test_equil.get_tck_splines()['b_z']
calculated_lambda = (2.*splev(a, b_theta_tck) /
(splev(a, b_z_tck)))
test.assert_approx_equal(calculated_lambda, test_equil.lambda_bar,
significant=3)
|
Add tests for equilibrium profiles.# -*- coding: utf-8 -*-
"""
Created on Thu Nov 5 09:44:38 2015
@author: jensv
"""
import numpy as np
import numpy.testing as test
import scipy.integrate as integrate
import equil_solver as es
from scipy.interpolate import splev
from nose.tools import with_setup
test_equil = None
def setup_func():
r"""
Generate test equilibrium.
"""
global test_equil
test_equil = es.UnitlessSmoothedCoreSkin(core_radius_norm=0.9,
transition_width_norm=0.033,
skin_width_norm=0.034,
epsilon=0.9, lambda_bar=.5)
def teardown_func():
pass
@with_setup(setup_func, teardown_func)
def test_epsilon():
r"""
Test that ratio of b_theta gives epsilon.
"""
r_core = test_equil.core_radius
a = (test_equil.core_radius + 2.*test_equil.transition_width +
test_equil.skin_width)
b_theta_tck = test_equil.get_tck_splines()['b_theta']
epsilon_from_b_theta_ratio = (splev(r_core, b_theta_tck) /
splev(a, b_theta_tck))
test.assert_approx_equal(epsilon_from_b_theta_ratio, test_equil.epsilon,
significant=3)
@with_setup(setup_func, teardown_func)
def test_lambda_bar():
r"""
Test that lambda bar is given by ratio of total current to magnetic flux.
"""
a = (test_equil.core_radius + 2.*test_equil.transition_width +
test_equil.skin_width)
b_theta_tck = test_equil.get_tck_splines()['b_theta']
b_z_tck = test_equil.get_tck_splines()['b_z']
calculated_lambda = (2.*splev(a, b_theta_tck) /
(splev(a, b_z_tck)))
test.assert_approx_equal(calculated_lambda, test_equil.lambda_bar,
significant=3)
|
<commit_before><commit_msg>Add tests for equilibrium profiles.<commit_after># -*- coding: utf-8 -*-
"""
Created on Thu Nov 5 09:44:38 2015
@author: jensv
"""
import numpy as np
import numpy.testing as test
import scipy.integrate as integrate
import equil_solver as es
from scipy.interpolate import splev
from nose.tools import with_setup
test_equil = None
def setup_func():
r"""
Generate test equilibrium.
"""
global test_equil
test_equil = es.UnitlessSmoothedCoreSkin(core_radius_norm=0.9,
transition_width_norm=0.033,
skin_width_norm=0.034,
epsilon=0.9, lambda_bar=.5)
def teardown_func():
pass
@with_setup(setup_func, teardown_func)
def test_epsilon():
r"""
Test that ratio of b_theta gives epsilon.
"""
r_core = test_equil.core_radius
a = (test_equil.core_radius + 2.*test_equil.transition_width +
test_equil.skin_width)
b_theta_tck = test_equil.get_tck_splines()['b_theta']
epsilon_from_b_theta_ratio = (splev(r_core, b_theta_tck) /
splev(a, b_theta_tck))
test.assert_approx_equal(epsilon_from_b_theta_ratio, test_equil.epsilon,
significant=3)
@with_setup(setup_func, teardown_func)
def test_lambda_bar():
r"""
Test that lambda bar is given by ratio of total current to magnetic flux.
"""
a = (test_equil.core_radius + 2.*test_equil.transition_width +
test_equil.skin_width)
b_theta_tck = test_equil.get_tck_splines()['b_theta']
b_z_tck = test_equil.get_tck_splines()['b_z']
calculated_lambda = (2.*splev(a, b_theta_tck) /
(splev(a, b_z_tck)))
test.assert_approx_equal(calculated_lambda, test_equil.lambda_bar,
significant=3)
|
|
04e63aa44bb2de8f769202c02a3a23a7e9d17d74
|
tests/unit/test_handlers.py
|
tests/unit/test_handlers.py
|
from pmxbot import core
def test_contains_always_match():
"""
Contains handler should always match if no rate is specified.
"""
handler = core.ContainsHandler(name='#', func=None)
assert handler.match('Tell me about #foo', channel='bar')
|
Add test capturing bad implementation of contains handler.
|
Add test capturing bad implementation of contains handler.
|
Python
|
mit
|
yougov/pmxbot,yougov/pmxbot,yougov/pmxbot
|
Add test capturing bad implementation of contains handler.
|
from pmxbot import core
def test_contains_always_match():
"""
Contains handler should always match if no rate is specified.
"""
handler = core.ContainsHandler(name='#', func=None)
assert handler.match('Tell me about #foo', channel='bar')
|
<commit_before><commit_msg>Add test capturing bad implementation of contains handler.<commit_after>
|
from pmxbot import core
def test_contains_always_match():
"""
Contains handler should always match if no rate is specified.
"""
handler = core.ContainsHandler(name='#', func=None)
assert handler.match('Tell me about #foo', channel='bar')
|
Add test capturing bad implementation of contains handler.from pmxbot import core
def test_contains_always_match():
"""
Contains handler should always match if no rate is specified.
"""
handler = core.ContainsHandler(name='#', func=None)
assert handler.match('Tell me about #foo', channel='bar')
|
<commit_before><commit_msg>Add test capturing bad implementation of contains handler.<commit_after>from pmxbot import core
def test_contains_always_match():
"""
Contains handler should always match if no rate is specified.
"""
handler = core.ContainsHandler(name='#', func=None)
assert handler.match('Tell me about #foo', channel='bar')
|
|
14d048091cfa24d0f6abd5cb61d814c4fee9e6db
|
hooks/post_gen_project.py
|
hooks/post_gen_project.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Rename the generated kv file to be compatible with the original kivy kv file
detection of `App.load_kv`.
"""
import os
package_dir = '{{cookiecutter.repo_name}}'
old_kv_file = os.path.join(package_dir, '{{cookiecutter.app_class_name}}.kv')
lower_app_class_name = '{{cookiecutter.app_class_name}}'.lower()
if (lower_app_class_name.endswith('app')):
lower_app_class_name = lower_app_class_name[:-3]
new_kv_file = os.path.join(package_dir, '{}.kv'.format(lower_app_class_name))
os.rename(old_kv_file, new_kv_file)
|
Implement a post gen python script to rename the kv file
|
Implement a post gen python script to rename the kv file
|
Python
|
mit
|
hackebrot/cookiedozer,hackebrot/cookiedozer
|
Implement a post gen python script to rename the kv file
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Rename the generated kv file to be compatible with the original kivy kv file
detection of `App.load_kv`.
"""
import os
package_dir = '{{cookiecutter.repo_name}}'
old_kv_file = os.path.join(package_dir, '{{cookiecutter.app_class_name}}.kv')
lower_app_class_name = '{{cookiecutter.app_class_name}}'.lower()
if (lower_app_class_name.endswith('app')):
lower_app_class_name = lower_app_class_name[:-3]
new_kv_file = os.path.join(package_dir, '{}.kv'.format(lower_app_class_name))
os.rename(old_kv_file, new_kv_file)
|
<commit_before><commit_msg>Implement a post gen python script to rename the kv file<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Rename the generated kv file to be compatible with the original kivy kv file
detection of `App.load_kv`.
"""
import os
package_dir = '{{cookiecutter.repo_name}}'
old_kv_file = os.path.join(package_dir, '{{cookiecutter.app_class_name}}.kv')
lower_app_class_name = '{{cookiecutter.app_class_name}}'.lower()
if (lower_app_class_name.endswith('app')):
lower_app_class_name = lower_app_class_name[:-3]
new_kv_file = os.path.join(package_dir, '{}.kv'.format(lower_app_class_name))
os.rename(old_kv_file, new_kv_file)
|
Implement a post gen python script to rename the kv file#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Rename the generated kv file to be compatible with the original kivy kv file
detection of `App.load_kv`.
"""
import os
package_dir = '{{cookiecutter.repo_name}}'
old_kv_file = os.path.join(package_dir, '{{cookiecutter.app_class_name}}.kv')
lower_app_class_name = '{{cookiecutter.app_class_name}}'.lower()
if (lower_app_class_name.endswith('app')):
lower_app_class_name = lower_app_class_name[:-3]
new_kv_file = os.path.join(package_dir, '{}.kv'.format(lower_app_class_name))
os.rename(old_kv_file, new_kv_file)
|
<commit_before><commit_msg>Implement a post gen python script to rename the kv file<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Rename the generated kv file to be compatible with the original kivy kv file
detection of `App.load_kv`.
"""
import os
package_dir = '{{cookiecutter.repo_name}}'
old_kv_file = os.path.join(package_dir, '{{cookiecutter.app_class_name}}.kv')
lower_app_class_name = '{{cookiecutter.app_class_name}}'.lower()
if (lower_app_class_name.endswith('app')):
lower_app_class_name = lower_app_class_name[:-3]
new_kv_file = os.path.join(package_dir, '{}.kv'.format(lower_app_class_name))
os.rename(old_kv_file, new_kv_file)
|
|
92e840aef7ac0d9aee629db58791a43a71cd578c
|
myhdl/test/conversion/numeric/test_numass.py
|
myhdl/test/conversion/numeric/test_numass.py
|
from __future__ import absolute_import, print_function
from random import randrange
from myhdl import Signal, uintba, sintba, instance, delay, conversion
def NumassBench():
p = Signal(uintba(1, 8))
q = Signal(uintba(1, 40))
r = Signal(sintba(1, 9))
s = Signal(sintba(1, 41))
PBIGINT = randrange(2**34, 2**40)
NBIGINT = -randrange(2**34, 2**40)
@instance
def check():
p.next = 0
q.next = 0
r.next = 0
s.next = 0
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 1
q.next = 1
r.next = 1
s.next = 1
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 2
q.next = 2
r.next = -2
s.next = -2
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 255
q.next = 246836311517
r.next = 255
s.next = -246836311517
yield delay(10)
print("%d %d %d %d %d %d" % (p, q[40:20], q[20:0], r,
s[41:20], s[20:0]))
p.next = 254
q.next = PBIGINT
r.next = -256
s.next = NBIGINT
yield delay(10)
print("%d %d %d %d %d %d" % (p, q[40:20], q[20:0], r,
s[41:20], s[20:0]))
return check
def test_numass():
assert conversion.verify(NumassBench) == 0
|
Revert "Revert "Revert "Revert "Added the number assignment test for numeric.""""
|
Revert "Revert "Revert "Revert "Added the number assignment test for numeric.""""
This reverts commit 91151bc6fd2c48c83656452e7c8f8f7e8b7b4218.
|
Python
|
lgpl-2.1
|
jmgc/myhdl-numeric,jmgc/myhdl-numeric,jmgc/myhdl-numeric
|
Revert "Revert "Revert "Revert "Added the number assignment test for numeric.""""
This reverts commit 91151bc6fd2c48c83656452e7c8f8f7e8b7b4218.
|
from __future__ import absolute_import, print_function
from random import randrange
from myhdl import Signal, uintba, sintba, instance, delay, conversion
def NumassBench():
p = Signal(uintba(1, 8))
q = Signal(uintba(1, 40))
r = Signal(sintba(1, 9))
s = Signal(sintba(1, 41))
PBIGINT = randrange(2**34, 2**40)
NBIGINT = -randrange(2**34, 2**40)
@instance
def check():
p.next = 0
q.next = 0
r.next = 0
s.next = 0
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 1
q.next = 1
r.next = 1
s.next = 1
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 2
q.next = 2
r.next = -2
s.next = -2
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 255
q.next = 246836311517
r.next = 255
s.next = -246836311517
yield delay(10)
print("%d %d %d %d %d %d" % (p, q[40:20], q[20:0], r,
s[41:20], s[20:0]))
p.next = 254
q.next = PBIGINT
r.next = -256
s.next = NBIGINT
yield delay(10)
print("%d %d %d %d %d %d" % (p, q[40:20], q[20:0], r,
s[41:20], s[20:0]))
return check
def test_numass():
assert conversion.verify(NumassBench) == 0
|
<commit_before><commit_msg>Revert "Revert "Revert "Revert "Added the number assignment test for numeric.""""
This reverts commit 91151bc6fd2c48c83656452e7c8f8f7e8b7b4218.<commit_after>
|
from __future__ import absolute_import, print_function
from random import randrange
from myhdl import Signal, uintba, sintba, instance, delay, conversion
def NumassBench():
p = Signal(uintba(1, 8))
q = Signal(uintba(1, 40))
r = Signal(sintba(1, 9))
s = Signal(sintba(1, 41))
PBIGINT = randrange(2**34, 2**40)
NBIGINT = -randrange(2**34, 2**40)
@instance
def check():
p.next = 0
q.next = 0
r.next = 0
s.next = 0
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 1
q.next = 1
r.next = 1
s.next = 1
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 2
q.next = 2
r.next = -2
s.next = -2
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 255
q.next = 246836311517
r.next = 255
s.next = -246836311517
yield delay(10)
print("%d %d %d %d %d %d" % (p, q[40:20], q[20:0], r,
s[41:20], s[20:0]))
p.next = 254
q.next = PBIGINT
r.next = -256
s.next = NBIGINT
yield delay(10)
print("%d %d %d %d %d %d" % (p, q[40:20], q[20:0], r,
s[41:20], s[20:0]))
return check
def test_numass():
assert conversion.verify(NumassBench) == 0
|
Revert "Revert "Revert "Revert "Added the number assignment test for numeric.""""
This reverts commit 91151bc6fd2c48c83656452e7c8f8f7e8b7b4218.from __future__ import absolute_import, print_function
from random import randrange
from myhdl import Signal, uintba, sintba, instance, delay, conversion
def NumassBench():
p = Signal(uintba(1, 8))
q = Signal(uintba(1, 40))
r = Signal(sintba(1, 9))
s = Signal(sintba(1, 41))
PBIGINT = randrange(2**34, 2**40)
NBIGINT = -randrange(2**34, 2**40)
@instance
def check():
p.next = 0
q.next = 0
r.next = 0
s.next = 0
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 1
q.next = 1
r.next = 1
s.next = 1
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 2
q.next = 2
r.next = -2
s.next = -2
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 255
q.next = 246836311517
r.next = 255
s.next = -246836311517
yield delay(10)
print("%d %d %d %d %d %d" % (p, q[40:20], q[20:0], r,
s[41:20], s[20:0]))
p.next = 254
q.next = PBIGINT
r.next = -256
s.next = NBIGINT
yield delay(10)
print("%d %d %d %d %d %d" % (p, q[40:20], q[20:0], r,
s[41:20], s[20:0]))
return check
def test_numass():
assert conversion.verify(NumassBench) == 0
|
<commit_before><commit_msg>Revert "Revert "Revert "Revert "Added the number assignment test for numeric.""""
This reverts commit 91151bc6fd2c48c83656452e7c8f8f7e8b7b4218.<commit_after>from __future__ import absolute_import, print_function
from random import randrange
from myhdl import Signal, uintba, sintba, instance, delay, conversion
def NumassBench():
p = Signal(uintba(1, 8))
q = Signal(uintba(1, 40))
r = Signal(sintba(1, 9))
s = Signal(sintba(1, 41))
PBIGINT = randrange(2**34, 2**40)
NBIGINT = -randrange(2**34, 2**40)
@instance
def check():
p.next = 0
q.next = 0
r.next = 0
s.next = 0
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 1
q.next = 1
r.next = 1
s.next = 1
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 2
q.next = 2
r.next = -2
s.next = -2
yield delay(10)
print("%d %d %d %d" % (p, q, r, s))
p.next = 255
q.next = 246836311517
r.next = 255
s.next = -246836311517
yield delay(10)
print("%d %d %d %d %d %d" % (p, q[40:20], q[20:0], r,
s[41:20], s[20:0]))
p.next = 254
q.next = PBIGINT
r.next = -256
s.next = NBIGINT
yield delay(10)
print("%d %d %d %d %d %d" % (p, q[40:20], q[20:0], r,
s[41:20], s[20:0]))
return check
def test_numass():
assert conversion.verify(NumassBench) == 0
|
|
de760c40c8aa187131b0b3b7a9517445cb648634
|
distance.py
|
distance.py
|
#Import needed libraries
import numpy as np
import cv2
cap=cv2.VideoCapture(0)
i=0
ret=True
while(i<1):
ret, frame =cap.read()
# print("Camera is still running buddy")
cv2.imwrite('test{0}.png'.format(i), frame)
i=i+1
cap.release()
cv2.destroyAllWindows()
|
Add python script to take image frame from live camera feed
|
Add python script to take image frame from live camera feed
|
Python
|
mit
|
galxy25/safeDriver
|
Add python script to take image frame from live camera feed
|
#Import needed libraries
import numpy as np
import cv2
cap=cv2.VideoCapture(0)
i=0
ret=True
while(i<1):
ret, frame =cap.read()
# print("Camera is still running buddy")
cv2.imwrite('test{0}.png'.format(i), frame)
i=i+1
cap.release()
cv2.destroyAllWindows()
|
<commit_before><commit_msg>Add python script to take image frame from live camera feed<commit_after>
|
#Import needed libraries
import numpy as np
import cv2
cap=cv2.VideoCapture(0)
i=0
ret=True
while(i<1):
ret, frame =cap.read()
# print("Camera is still running buddy")
cv2.imwrite('test{0}.png'.format(i), frame)
i=i+1
cap.release()
cv2.destroyAllWindows()
|
Add python script to take image frame from live camera feed#Import needed libraries
import numpy as np
import cv2
cap=cv2.VideoCapture(0)
i=0
ret=True
while(i<1):
ret, frame =cap.read()
# print("Camera is still running buddy")
cv2.imwrite('test{0}.png'.format(i), frame)
i=i+1
cap.release()
cv2.destroyAllWindows()
|
<commit_before><commit_msg>Add python script to take image frame from live camera feed<commit_after>#Import needed libraries
import numpy as np
import cv2
cap=cv2.VideoCapture(0)
i=0
ret=True
while(i<1):
ret, frame =cap.read()
# print("Camera is still running buddy")
cv2.imwrite('test{0}.png'.format(i), frame)
i=i+1
cap.release()
cv2.destroyAllWindows()
|
|
08749433dfcc8e3df19adcdfc4598f9cba43adee
|
ansible-tests/validations.py
|
ansible-tests/validations.py
|
#!/usr/bin/env python
import sys
import glob
def die(msg):
print msg
sys.exit(1)
def validations():
validations = glob.glob('playbooks/*.yaml')
return list(sorted(validations))
def command_list(**args):
for i, name in enumerate(validations()):
print "%d. %s" % (i + 1, name)
def command_run(*args):
if len(args) != 1:
die("You must pass one argument: the validation ID.")
try:
index = int(args[0]) - 1
except ValueError:
die("Validation ID must be a number.")
if index < 0:
die("Validation ID must be a positive number.")
try:
validation_path = validations()[index]
except IndexError:
die("Invalid validation ID.")
sys.exit(1)
print "Running validation '%s'" % validation_path
def unknown_command(*args):
die("Unknown command")
if __name__ == '__main__':
if len(sys.argv) <= 1:
die("You must enter a command")
command = sys.argv[1]
command_fn = globals().get('command_%s' % command, unknown_command)
command_fn(*sys.argv[2:])
|
Add a basic runner program
|
Add a basic runner program
It just lists validations right now. Ultimately it will run them and
potentially provide a HTTP API.
|
Python
|
apache-2.0
|
rthallisey/clapper,coolsvap/clapper,coolsvap/clapper,coolsvap/clapper,rthallisey/clapper
|
Add a basic runner program
It just lists validations right now. Ultimately it will run them and
potentially provide a HTTP API.
|
#!/usr/bin/env python
import sys
import glob
def die(msg):
print msg
sys.exit(1)
def validations():
validations = glob.glob('playbooks/*.yaml')
return list(sorted(validations))
def command_list(**args):
for i, name in enumerate(validations()):
print "%d. %s" % (i + 1, name)
def command_run(*args):
if len(args) != 1:
die("You must pass one argument: the validation ID.")
try:
index = int(args[0]) - 1
except ValueError:
die("Validation ID must be a number.")
if index < 0:
die("Validation ID must be a positive number.")
try:
validation_path = validations()[index]
except IndexError:
die("Invalid validation ID.")
sys.exit(1)
print "Running validation '%s'" % validation_path
def unknown_command(*args):
die("Unknown command")
if __name__ == '__main__':
if len(sys.argv) <= 1:
die("You must enter a command")
command = sys.argv[1]
command_fn = globals().get('command_%s' % command, unknown_command)
command_fn(*sys.argv[2:])
|
<commit_before><commit_msg>Add a basic runner program
It just lists validations right now. Ultimately it will run them and
potentially provide a HTTP API.<commit_after>
|
#!/usr/bin/env python
import sys
import glob
def die(msg):
print msg
sys.exit(1)
def validations():
validations = glob.glob('playbooks/*.yaml')
return list(sorted(validations))
def command_list(**args):
for i, name in enumerate(validations()):
print "%d. %s" % (i + 1, name)
def command_run(*args):
if len(args) != 1:
die("You must pass one argument: the validation ID.")
try:
index = int(args[0]) - 1
except ValueError:
die("Validation ID must be a number.")
if index < 0:
die("Validation ID must be a positive number.")
try:
validation_path = validations()[index]
except IndexError:
die("Invalid validation ID.")
sys.exit(1)
print "Running validation '%s'" % validation_path
def unknown_command(*args):
die("Unknown command")
if __name__ == '__main__':
if len(sys.argv) <= 1:
die("You must enter a command")
command = sys.argv[1]
command_fn = globals().get('command_%s' % command, unknown_command)
command_fn(*sys.argv[2:])
|
Add a basic runner program
It just lists validations right now. Ultimately it will run them and
potentially provide a HTTP API.#!/usr/bin/env python
import sys
import glob
def die(msg):
print msg
sys.exit(1)
def validations():
validations = glob.glob('playbooks/*.yaml')
return list(sorted(validations))
def command_list(**args):
for i, name in enumerate(validations()):
print "%d. %s" % (i + 1, name)
def command_run(*args):
if len(args) != 1:
die("You must pass one argument: the validation ID.")
try:
index = int(args[0]) - 1
except ValueError:
die("Validation ID must be a number.")
if index < 0:
die("Validation ID must be a positive number.")
try:
validation_path = validations()[index]
except IndexError:
die("Invalid validation ID.")
sys.exit(1)
print "Running validation '%s'" % validation_path
def unknown_command(*args):
die("Unknown command")
if __name__ == '__main__':
if len(sys.argv) <= 1:
die("You must enter a command")
command = sys.argv[1]
command_fn = globals().get('command_%s' % command, unknown_command)
command_fn(*sys.argv[2:])
|
<commit_before><commit_msg>Add a basic runner program
It just lists validations right now. Ultimately it will run them and
potentially provide a HTTP API.<commit_after>#!/usr/bin/env python
import sys
import glob
def die(msg):
print msg
sys.exit(1)
def validations():
validations = glob.glob('playbooks/*.yaml')
return list(sorted(validations))
def command_list(**args):
for i, name in enumerate(validations()):
print "%d. %s" % (i + 1, name)
def command_run(*args):
if len(args) != 1:
die("You must pass one argument: the validation ID.")
try:
index = int(args[0]) - 1
except ValueError:
die("Validation ID must be a number.")
if index < 0:
die("Validation ID must be a positive number.")
try:
validation_path = validations()[index]
except IndexError:
die("Invalid validation ID.")
sys.exit(1)
print "Running validation '%s'" % validation_path
def unknown_command(*args):
die("Unknown command")
if __name__ == '__main__':
if len(sys.argv) <= 1:
die("You must enter a command")
command = sys.argv[1]
command_fn = globals().get('command_%s' % command, unknown_command)
command_fn(*sys.argv[2:])
|
|
d3cac0f637a8667497ff311fc94bdceb19330b77
|
plugins/modules/dedicated_server_monitoring.py
|
plugins/modules/dedicated_server_monitoring.py
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = '''
---
module: dedicated_server_monitoring
short_description: Enable or disable ovh monitoring on a dedicated server
description:
- Enable or disable ovh monitoring on a dedicated server
author: Synthesio SRE Team
requirements:
- ovh >= 0.5.0
options:
serviceName:
required: true
description: The serviceName
state:
required: true
description: Indicate the desired state of monitoring
choices:
- present
- absent
'''
EXAMPLES = '''
synthesio.ovh.dedicated_server_monitoring:
serviceName: "{{ serviceName }}"
state: "present"
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible_collections.synthesio.ovh.plugins.module_utils.ovh import ovh_api_connect, ovh_argument_spec
try:
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
def run_module():
module_args = ovh_argument_spec()
module_args.update(dict(
serviceName=dict(required=True),
state=dict(choices=['present', 'absent'], default='present')
))
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
client = ovh_api_connect(module)
serviceName = module.params['serviceName']
state = module.params['state']
if state == 'present':
monitoring_bool = True
elif state == 'absent':
monitoring_bool = False
if module.check_mode:
module.exit_json(msg="Monitoring is now {} for {} - (dry run mode)".format(state, serviceName), changed=True)
try:
server_state = client.get('/dedicated/server/%s' % serviceName)
if server_state['monitoring'] == monitoring_bool:
module.exit_json(msg="Monitoring is already {} on {}".format(state, serviceName), changed=False)
client.put('/dedicated/server/%s' % serviceName, monitoring=monitoring_bool)
module.exit_json(msg="Monitoring is now {} on {}".format(state, serviceName), changed=True)
except APIError as api_error:
module.fail_json(msg="Failed to call OVH API: {0}".format(api_error), changed=False)
def main():
run_module()
if __name__ == '__main__':
main()
|
Add dedicated server monitoring modulec
|
INFRA-6746: Add dedicated server monitoring modulec
- Then you can add or remove OVH monitoring
|
Python
|
mit
|
synthesio/infra-ovh-ansible-module
|
INFRA-6746: Add dedicated server monitoring modulec
- Then you can add or remove OVH monitoring
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = '''
---
module: dedicated_server_monitoring
short_description: Enable or disable ovh monitoring on a dedicated server
description:
- Enable or disable ovh monitoring on a dedicated server
author: Synthesio SRE Team
requirements:
- ovh >= 0.5.0
options:
serviceName:
required: true
description: The serviceName
state:
required: true
description: Indicate the desired state of monitoring
choices:
- present
- absent
'''
EXAMPLES = '''
synthesio.ovh.dedicated_server_monitoring:
serviceName: "{{ serviceName }}"
state: "present"
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible_collections.synthesio.ovh.plugins.module_utils.ovh import ovh_api_connect, ovh_argument_spec
try:
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
def run_module():
module_args = ovh_argument_spec()
module_args.update(dict(
serviceName=dict(required=True),
state=dict(choices=['present', 'absent'], default='present')
))
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
client = ovh_api_connect(module)
serviceName = module.params['serviceName']
state = module.params['state']
if state == 'present':
monitoring_bool = True
elif state == 'absent':
monitoring_bool = False
if module.check_mode:
module.exit_json(msg="Monitoring is now {} for {} - (dry run mode)".format(state, serviceName), changed=True)
try:
server_state = client.get('/dedicated/server/%s' % serviceName)
if server_state['monitoring'] == monitoring_bool:
module.exit_json(msg="Monitoring is already {} on {}".format(state, serviceName), changed=False)
client.put('/dedicated/server/%s' % serviceName, monitoring=monitoring_bool)
module.exit_json(msg="Monitoring is now {} on {}".format(state, serviceName), changed=True)
except APIError as api_error:
module.fail_json(msg="Failed to call OVH API: {0}".format(api_error), changed=False)
def main():
run_module()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>INFRA-6746: Add dedicated server monitoring modulec
- Then you can add or remove OVH monitoring<commit_after>
|
#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = '''
---
module: dedicated_server_monitoring
short_description: Enable or disable ovh monitoring on a dedicated server
description:
- Enable or disable ovh monitoring on a dedicated server
author: Synthesio SRE Team
requirements:
- ovh >= 0.5.0
options:
serviceName:
required: true
description: The serviceName
state:
required: true
description: Indicate the desired state of monitoring
choices:
- present
- absent
'''
EXAMPLES = '''
synthesio.ovh.dedicated_server_monitoring:
serviceName: "{{ serviceName }}"
state: "present"
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible_collections.synthesio.ovh.plugins.module_utils.ovh import ovh_api_connect, ovh_argument_spec
try:
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
def run_module():
module_args = ovh_argument_spec()
module_args.update(dict(
serviceName=dict(required=True),
state=dict(choices=['present', 'absent'], default='present')
))
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
client = ovh_api_connect(module)
serviceName = module.params['serviceName']
state = module.params['state']
if state == 'present':
monitoring_bool = True
elif state == 'absent':
monitoring_bool = False
if module.check_mode:
module.exit_json(msg="Monitoring is now {} for {} - (dry run mode)".format(state, serviceName), changed=True)
try:
server_state = client.get('/dedicated/server/%s' % serviceName)
if server_state['monitoring'] == monitoring_bool:
module.exit_json(msg="Monitoring is already {} on {}".format(state, serviceName), changed=False)
client.put('/dedicated/server/%s' % serviceName, monitoring=monitoring_bool)
module.exit_json(msg="Monitoring is now {} on {}".format(state, serviceName), changed=True)
except APIError as api_error:
module.fail_json(msg="Failed to call OVH API: {0}".format(api_error), changed=False)
def main():
run_module()
if __name__ == '__main__':
main()
|
INFRA-6746: Add dedicated server monitoring modulec
- Then you can add or remove OVH monitoring#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = '''
---
module: dedicated_server_monitoring
short_description: Enable or disable ovh monitoring on a dedicated server
description:
- Enable or disable ovh monitoring on a dedicated server
author: Synthesio SRE Team
requirements:
- ovh >= 0.5.0
options:
serviceName:
required: true
description: The serviceName
state:
required: true
description: Indicate the desired state of monitoring
choices:
- present
- absent
'''
EXAMPLES = '''
synthesio.ovh.dedicated_server_monitoring:
serviceName: "{{ serviceName }}"
state: "present"
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible_collections.synthesio.ovh.plugins.module_utils.ovh import ovh_api_connect, ovh_argument_spec
try:
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
def run_module():
module_args = ovh_argument_spec()
module_args.update(dict(
serviceName=dict(required=True),
state=dict(choices=['present', 'absent'], default='present')
))
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
client = ovh_api_connect(module)
serviceName = module.params['serviceName']
state = module.params['state']
if state == 'present':
monitoring_bool = True
elif state == 'absent':
monitoring_bool = False
if module.check_mode:
module.exit_json(msg="Monitoring is now {} for {} - (dry run mode)".format(state, serviceName), changed=True)
try:
server_state = client.get('/dedicated/server/%s' % serviceName)
if server_state['monitoring'] == monitoring_bool:
module.exit_json(msg="Monitoring is already {} on {}".format(state, serviceName), changed=False)
client.put('/dedicated/server/%s' % serviceName, monitoring=monitoring_bool)
module.exit_json(msg="Monitoring is now {} on {}".format(state, serviceName), changed=True)
except APIError as api_error:
module.fail_json(msg="Failed to call OVH API: {0}".format(api_error), changed=False)
def main():
run_module()
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>INFRA-6746: Add dedicated server monitoring modulec
- Then you can add or remove OVH monitoring<commit_after>#!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule
DOCUMENTATION = '''
---
module: dedicated_server_monitoring
short_description: Enable or disable ovh monitoring on a dedicated server
description:
- Enable or disable ovh monitoring on a dedicated server
author: Synthesio SRE Team
requirements:
- ovh >= 0.5.0
options:
serviceName:
required: true
description: The serviceName
state:
required: true
description: Indicate the desired state of monitoring
choices:
- present
- absent
'''
EXAMPLES = '''
synthesio.ovh.dedicated_server_monitoring:
serviceName: "{{ serviceName }}"
state: "present"
delegate_to: localhost
'''
RETURN = ''' # '''
from ansible_collections.synthesio.ovh.plugins.module_utils.ovh import ovh_api_connect, ovh_argument_spec
try:
from ovh.exceptions import APIError
HAS_OVH = True
except ImportError:
HAS_OVH = False
def run_module():
module_args = ovh_argument_spec()
module_args.update(dict(
serviceName=dict(required=True),
state=dict(choices=['present', 'absent'], default='present')
))
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
client = ovh_api_connect(module)
serviceName = module.params['serviceName']
state = module.params['state']
if state == 'present':
monitoring_bool = True
elif state == 'absent':
monitoring_bool = False
if module.check_mode:
module.exit_json(msg="Monitoring is now {} for {} - (dry run mode)".format(state, serviceName), changed=True)
try:
server_state = client.get('/dedicated/server/%s' % serviceName)
if server_state['monitoring'] == monitoring_bool:
module.exit_json(msg="Monitoring is already {} on {}".format(state, serviceName), changed=False)
client.put('/dedicated/server/%s' % serviceName, monitoring=monitoring_bool)
module.exit_json(msg="Monitoring is now {} on {}".format(state, serviceName), changed=True)
except APIError as api_error:
module.fail_json(msg="Failed to call OVH API: {0}".format(api_error), changed=False)
def main():
run_module()
if __name__ == '__main__':
main()
|
|
9080fae1e329a541102ec31fbf7a93dab36c891b
|
scripts/migrate_logs_branded_preprints.py
|
scripts/migrate_logs_branded_preprints.py
|
import sys
import logging
from website.app import init_app
from website.models import NodeLog, PreprintService
from scripts import utils as script_utils
from modularodm import Q
logger = logging.getLogger(__name__)
def do_migration(records, dry=False):
count = 0
for log in records:
provider = PreprintService.find_one(Q('node', 'eq', log.params.get('node'))).provider
logger.info(
'Migrating log - {} - to add Provider: {}, '.format(log._id, provider._id)
)
if not dry:
log.params['preprint_provider'] = provider._id
log.save()
count += 1
logger.info('{}Migrated {} logs'.format('[dry]'if dry else '', count))
def get_targets():
return NodeLog.find(
Q('action', 'eq', 'preprint_initiated') &
Q('params.preprint_provider', 'exists', False)
)
def main():
init_app(routes=False) # Sets the storage backends on all models
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
do_migration(get_targets(), dry)
if __name__ == '__main__':
main()
|
Add node log migration script for branded preprints
|
Add node log migration script for branded preprints
|
Python
|
apache-2.0
|
baylee-d/osf.io,leb2dg/osf.io,Johnetordoff/osf.io,brianjgeiger/osf.io,chennan47/osf.io,chrisseto/osf.io,rdhyee/osf.io,caneruguz/osf.io,CenterForOpenScience/osf.io,sloria/osf.io,alexschiller/osf.io,mluo613/osf.io,caseyrollins/osf.io,binoculars/osf.io,pattisdr/osf.io,HalcyonChimera/osf.io,caseyrollins/osf.io,hmoco/osf.io,felliott/osf.io,icereval/osf.io,alexschiller/osf.io,leb2dg/osf.io,TomBaxter/osf.io,binoculars/osf.io,baylee-d/osf.io,leb2dg/osf.io,alexschiller/osf.io,mattclark/osf.io,Johnetordoff/osf.io,TomBaxter/osf.io,aaxelb/osf.io,Nesiehr/osf.io,monikagrabowska/osf.io,CenterForOpenScience/osf.io,chrisseto/osf.io,aaxelb/osf.io,cslzchen/osf.io,brianjgeiger/osf.io,felliott/osf.io,hmoco/osf.io,monikagrabowska/osf.io,cwisecarver/osf.io,cwisecarver/osf.io,monikagrabowska/osf.io,Nesiehr/osf.io,CenterForOpenScience/osf.io,HalcyonChimera/osf.io,adlius/osf.io,Johnetordoff/osf.io,erinspace/osf.io,erinspace/osf.io,felliott/osf.io,laurenrevere/osf.io,HalcyonChimera/osf.io,chrisseto/osf.io,mluo613/osf.io,hmoco/osf.io,crcresearch/osf.io,caneruguz/osf.io,icereval/osf.io,chennan47/osf.io,adlius/osf.io,mluo613/osf.io,adlius/osf.io,icereval/osf.io,saradbowman/osf.io,Johnetordoff/osf.io,acshi/osf.io,pattisdr/osf.io,mfraezz/osf.io,caneruguz/osf.io,rdhyee/osf.io,pattisdr/osf.io,hmoco/osf.io,crcresearch/osf.io,mfraezz/osf.io,felliott/osf.io,laurenrevere/osf.io,Nesiehr/osf.io,aaxelb/osf.io,laurenrevere/osf.io,caneruguz/osf.io,acshi/osf.io,caseyrollins/osf.io,cslzchen/osf.io,chrisseto/osf.io,monikagrabowska/osf.io,Nesiehr/osf.io,crcresearch/osf.io,adlius/osf.io,baylee-d/osf.io,HalcyonChimera/osf.io,cslzchen/osf.io,cslzchen/osf.io,erinspace/osf.io,leb2dg/osf.io,monikagrabowska/osf.io,alexschiller/osf.io,TomBaxter/osf.io,binoculars/osf.io,saradbowman/osf.io,acshi/osf.io,cwisecarver/osf.io,alexschiller/osf.io,mfraezz/osf.io,acshi/osf.io,sloria/osf.io,aaxelb/osf.io,mfraezz/osf.io,brianjgeiger/osf.io,rdhyee/osf.io,rdhyee/osf.io,acshi/osf.io,cwisecarver/osf.io,sloria/osf.io,brianjgeiger/osf.io,mattclark/osf.io,CenterForOpenScience/osf.io,chennan47/osf.io,mluo613/osf.io,mattclark/osf.io,mluo613/osf.io
|
Add node log migration script for branded preprints
|
import sys
import logging
from website.app import init_app
from website.models import NodeLog, PreprintService
from scripts import utils as script_utils
from modularodm import Q
logger = logging.getLogger(__name__)
def do_migration(records, dry=False):
count = 0
for log in records:
provider = PreprintService.find_one(Q('node', 'eq', log.params.get('node'))).provider
logger.info(
'Migrating log - {} - to add Provider: {}, '.format(log._id, provider._id)
)
if not dry:
log.params['preprint_provider'] = provider._id
log.save()
count += 1
logger.info('{}Migrated {} logs'.format('[dry]'if dry else '', count))
def get_targets():
return NodeLog.find(
Q('action', 'eq', 'preprint_initiated') &
Q('params.preprint_provider', 'exists', False)
)
def main():
init_app(routes=False) # Sets the storage backends on all models
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
do_migration(get_targets(), dry)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add node log migration script for branded preprints<commit_after>
|
import sys
import logging
from website.app import init_app
from website.models import NodeLog, PreprintService
from scripts import utils as script_utils
from modularodm import Q
logger = logging.getLogger(__name__)
def do_migration(records, dry=False):
count = 0
for log in records:
provider = PreprintService.find_one(Q('node', 'eq', log.params.get('node'))).provider
logger.info(
'Migrating log - {} - to add Provider: {}, '.format(log._id, provider._id)
)
if not dry:
log.params['preprint_provider'] = provider._id
log.save()
count += 1
logger.info('{}Migrated {} logs'.format('[dry]'if dry else '', count))
def get_targets():
return NodeLog.find(
Q('action', 'eq', 'preprint_initiated') &
Q('params.preprint_provider', 'exists', False)
)
def main():
init_app(routes=False) # Sets the storage backends on all models
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
do_migration(get_targets(), dry)
if __name__ == '__main__':
main()
|
Add node log migration script for branded preprintsimport sys
import logging
from website.app import init_app
from website.models import NodeLog, PreprintService
from scripts import utils as script_utils
from modularodm import Q
logger = logging.getLogger(__name__)
def do_migration(records, dry=False):
count = 0
for log in records:
provider = PreprintService.find_one(Q('node', 'eq', log.params.get('node'))).provider
logger.info(
'Migrating log - {} - to add Provider: {}, '.format(log._id, provider._id)
)
if not dry:
log.params['preprint_provider'] = provider._id
log.save()
count += 1
logger.info('{}Migrated {} logs'.format('[dry]'if dry else '', count))
def get_targets():
return NodeLog.find(
Q('action', 'eq', 'preprint_initiated') &
Q('params.preprint_provider', 'exists', False)
)
def main():
init_app(routes=False) # Sets the storage backends on all models
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
do_migration(get_targets(), dry)
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add node log migration script for branded preprints<commit_after>import sys
import logging
from website.app import init_app
from website.models import NodeLog, PreprintService
from scripts import utils as script_utils
from modularodm import Q
logger = logging.getLogger(__name__)
def do_migration(records, dry=False):
count = 0
for log in records:
provider = PreprintService.find_one(Q('node', 'eq', log.params.get('node'))).provider
logger.info(
'Migrating log - {} - to add Provider: {}, '.format(log._id, provider._id)
)
if not dry:
log.params['preprint_provider'] = provider._id
log.save()
count += 1
logger.info('{}Migrated {} logs'.format('[dry]'if dry else '', count))
def get_targets():
return NodeLog.find(
Q('action', 'eq', 'preprint_initiated') &
Q('params.preprint_provider', 'exists', False)
)
def main():
init_app(routes=False) # Sets the storage backends on all models
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
do_migration(get_targets(), dry)
if __name__ == '__main__':
main()
|
|
a5efa5bd91cbbb2e963f48e4b8c2c371da02a6ec
|
readthedocs/core/migrations/0002_make_userprofile_user_a_onetoonefield.py
|
readthedocs/core/migrations/0002_make_userprofile_user_a_onetoonefield.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='user',
field=models.OneToOneField(related_name='profile', verbose_name='User', to=settings.AUTH_USER_MODEL),
),
]
|
Add migration for UserProfile.user's change to OneToOneField
|
Add migration for UserProfile.user's change to OneToOneField
|
Python
|
mit
|
gjtorikian/readthedocs.org,kdkeyser/readthedocs.org,titiushko/readthedocs.org,LukasBoersma/readthedocs.org,SteveViss/readthedocs.org,rtfd/readthedocs.org,techtonik/readthedocs.org,soulshake/readthedocs.org,royalwang/readthedocs.org,sunnyzwh/readthedocs.org,michaelmcandrew/readthedocs.org,wijerasa/readthedocs.org,clarkperkins/readthedocs.org,singingwolfboy/readthedocs.org,kenwang76/readthedocs.org,techtonik/readthedocs.org,fujita-shintaro/readthedocs.org,istresearch/readthedocs.org,kdkeyser/readthedocs.org,davidfischer/readthedocs.org,safwanrahman/readthedocs.org,wijerasa/readthedocs.org,gjtorikian/readthedocs.org,michaelmcandrew/readthedocs.org,clarkperkins/readthedocs.org,royalwang/readthedocs.org,soulshake/readthedocs.org,tddv/readthedocs.org,gjtorikian/readthedocs.org,CedarLogic/readthedocs.org,attakei/readthedocs-oauth,hach-que/readthedocs.org,stevepiercy/readthedocs.org,davidfischer/readthedocs.org,CedarLogic/readthedocs.org,fujita-shintaro/readthedocs.org,tddv/readthedocs.org,soulshake/readthedocs.org,VishvajitP/readthedocs.org,rtfd/readthedocs.org,pombredanne/readthedocs.org,hach-que/readthedocs.org,jerel/readthedocs.org,GovReady/readthedocs.org,michaelmcandrew/readthedocs.org,atsuyim/readthedocs.org,LukasBoersma/readthedocs.org,espdev/readthedocs.org,rtfd/readthedocs.org,fujita-shintaro/readthedocs.org,michaelmcandrew/readthedocs.org,Tazer/readthedocs.org,CedarLogic/readthedocs.org,sid-kap/readthedocs.org,SteveViss/readthedocs.org,raven47git/readthedocs.org,jerel/readthedocs.org,asampat3090/readthedocs.org,singingwolfboy/readthedocs.org,techtonik/readthedocs.org,singingwolfboy/readthedocs.org,raven47git/readthedocs.org,sid-kap/readthedocs.org,safwanrahman/readthedocs.org,kenwang76/readthedocs.org,wanghaven/readthedocs.org,istresearch/readthedocs.org,sid-kap/readthedocs.org,LukasBoersma/readthedocs.org,kenshinthebattosai/readthedocs.org,sid-kap/readthedocs.org,hach-que/readthedocs.org,Tazer/readthedocs.org,kenwang76/readthedocs.org,raven47git/readthedocs.org,gjtorikian/readthedocs.org,istresearch/readthedocs.org,soulshake/readthedocs.org,atsuyim/readthedocs.org,kdkeyser/readthedocs.org,kdkeyser/readthedocs.org,safwanrahman/readthedocs.org,techtonik/readthedocs.org,clarkperkins/readthedocs.org,laplaceliu/readthedocs.org,wijerasa/readthedocs.org,VishvajitP/readthedocs.org,espdev/readthedocs.org,sunnyzwh/readthedocs.org,VishvajitP/readthedocs.org,asampat3090/readthedocs.org,mhils/readthedocs.org,emawind84/readthedocs.org,kenshinthebattosai/readthedocs.org,GovReady/readthedocs.org,Tazer/readthedocs.org,stevepiercy/readthedocs.org,wanghaven/readthedocs.org,Tazer/readthedocs.org,attakei/readthedocs-oauth,espdev/readthedocs.org,jerel/readthedocs.org,GovReady/readthedocs.org,atsuyim/readthedocs.org,emawind84/readthedocs.org,emawind84/readthedocs.org,atsuyim/readthedocs.org,CedarLogic/readthedocs.org,espdev/readthedocs.org,pombredanne/readthedocs.org,laplaceliu/readthedocs.org,hach-que/readthedocs.org,stevepiercy/readthedocs.org,titiushko/readthedocs.org,singingwolfboy/readthedocs.org,stevepiercy/readthedocs.org,davidfischer/readthedocs.org,fujita-shintaro/readthedocs.org,LukasBoersma/readthedocs.org,clarkperkins/readthedocs.org,SteveViss/readthedocs.org,raven47git/readthedocs.org,rtfd/readthedocs.org,kenwang76/readthedocs.org,espdev/readthedocs.org,davidfischer/readthedocs.org,SteveViss/readthedocs.org,pombredanne/readthedocs.org,safwanrahman/readthedocs.org,laplaceliu/readthedocs.org,wanghaven/readthedocs.org,jerel/readthedocs.org,titiushko/readthedocs.org,kenshinthebattosai/readthedocs.org,istresearch/readthedocs.org,GovReady/readthedocs.org,mhils/readthedocs.org,royalwang/readthedocs.org,asampat3090/readthedocs.org,titiushko/readthedocs.org,VishvajitP/readthedocs.org,laplaceliu/readthedocs.org,kenshinthebattosai/readthedocs.org,wijerasa/readthedocs.org,attakei/readthedocs-oauth,mhils/readthedocs.org,royalwang/readthedocs.org,tddv/readthedocs.org,sunnyzwh/readthedocs.org,sunnyzwh/readthedocs.org,asampat3090/readthedocs.org,emawind84/readthedocs.org,mhils/readthedocs.org,attakei/readthedocs-oauth,wanghaven/readthedocs.org
|
Add migration for UserProfile.user's change to OneToOneField
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='user',
field=models.OneToOneField(related_name='profile', verbose_name='User', to=settings.AUTH_USER_MODEL),
),
]
|
<commit_before><commit_msg>Add migration for UserProfile.user's change to OneToOneField<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='user',
field=models.OneToOneField(related_name='profile', verbose_name='User', to=settings.AUTH_USER_MODEL),
),
]
|
Add migration for UserProfile.user's change to OneToOneField# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='user',
field=models.OneToOneField(related_name='profile', verbose_name='User', to=settings.AUTH_USER_MODEL),
),
]
|
<commit_before><commit_msg>Add migration for UserProfile.user's change to OneToOneField<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='user',
field=models.OneToOneField(related_name='profile', verbose_name='User', to=settings.AUTH_USER_MODEL),
),
]
|
|
c600509c9c5069cc27d0c63ae34dc62a80087fa1
|
scripts/rosedu-chat-logs.py
|
scripts/rosedu-chat-logs.py
|
#!/usr/local/bin/python3
import requests
from datetime import datetime, timedelta
url = "http://data.softwareliber.ro/irc-logs/%23rosedu/%23rosedu.{0:02d}-{1:02d}-{2}.log"
now = datetime.now()
yesterday = now - timedelta(hours=24)
ret = []
forbidden_lines = ["has quit", "has joined", "has left"]
for dates in [yesterday, now]:
ret.append("### NEW DAY ###")
resp = requests.get(url.format(dates.day, dates.month, dates.year))
lines = resp.text.split("\n")
for response_line in lines:
if not any(forbidden_line in response_line for forbidden_line in forbidden_lines):
ret.append(response_line)
print("\n".join(ret))
|
Add ROSEdu chat logs script
|
Add ROSEdu chat logs script
|
Python
|
mit
|
smathson/dotfiles
|
Add ROSEdu chat logs script
|
#!/usr/local/bin/python3
import requests
from datetime import datetime, timedelta
url = "http://data.softwareliber.ro/irc-logs/%23rosedu/%23rosedu.{0:02d}-{1:02d}-{2}.log"
now = datetime.now()
yesterday = now - timedelta(hours=24)
ret = []
forbidden_lines = ["has quit", "has joined", "has left"]
for dates in [yesterday, now]:
ret.append("### NEW DAY ###")
resp = requests.get(url.format(dates.day, dates.month, dates.year))
lines = resp.text.split("\n")
for response_line in lines:
if not any(forbidden_line in response_line for forbidden_line in forbidden_lines):
ret.append(response_line)
print("\n".join(ret))
|
<commit_before><commit_msg>Add ROSEdu chat logs script<commit_after>
|
#!/usr/local/bin/python3
import requests
from datetime import datetime, timedelta
url = "http://data.softwareliber.ro/irc-logs/%23rosedu/%23rosedu.{0:02d}-{1:02d}-{2}.log"
now = datetime.now()
yesterday = now - timedelta(hours=24)
ret = []
forbidden_lines = ["has quit", "has joined", "has left"]
for dates in [yesterday, now]:
ret.append("### NEW DAY ###")
resp = requests.get(url.format(dates.day, dates.month, dates.year))
lines = resp.text.split("\n")
for response_line in lines:
if not any(forbidden_line in response_line for forbidden_line in forbidden_lines):
ret.append(response_line)
print("\n".join(ret))
|
Add ROSEdu chat logs script#!/usr/local/bin/python3
import requests
from datetime import datetime, timedelta
url = "http://data.softwareliber.ro/irc-logs/%23rosedu/%23rosedu.{0:02d}-{1:02d}-{2}.log"
now = datetime.now()
yesterday = now - timedelta(hours=24)
ret = []
forbidden_lines = ["has quit", "has joined", "has left"]
for dates in [yesterday, now]:
ret.append("### NEW DAY ###")
resp = requests.get(url.format(dates.day, dates.month, dates.year))
lines = resp.text.split("\n")
for response_line in lines:
if not any(forbidden_line in response_line for forbidden_line in forbidden_lines):
ret.append(response_line)
print("\n".join(ret))
|
<commit_before><commit_msg>Add ROSEdu chat logs script<commit_after>#!/usr/local/bin/python3
import requests
from datetime import datetime, timedelta
url = "http://data.softwareliber.ro/irc-logs/%23rosedu/%23rosedu.{0:02d}-{1:02d}-{2}.log"
now = datetime.now()
yesterday = now - timedelta(hours=24)
ret = []
forbidden_lines = ["has quit", "has joined", "has left"]
for dates in [yesterday, now]:
ret.append("### NEW DAY ###")
resp = requests.get(url.format(dates.day, dates.month, dates.year))
lines = resp.text.split("\n")
for response_line in lines:
if not any(forbidden_line in response_line for forbidden_line in forbidden_lines):
ret.append(response_line)
print("\n".join(ret))
|
|
ff3a3cd831a70d89864d040c9cc6a71a378a5569
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from cron_sentry.version import VERSION
setup(
name='cron-sentry',
version=VERSION,
author='Yipit Coders',
author_email='coders@yipit.com',
description='Cron-Sentry is a command-line wrapper that reports unsuccessful runs to Sentry (https://www.getsentry.com)',
long_description=open('README.md').read(),
license='MIT',
classifiers=[
'Topic :: Utilities',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
],
url='http://github.com/yipit/cron-sentry',
packages=find_packages(),
install_requires=['raven'],
data_files=[],
entry_points={
'console_scripts': [
# `raven-cron` entry point is for backwards compatibility purposes.
# it should get removed in future releases
'raven-cron = cron_sentry.runner:run',
'cron-sentry = cron_sentry.runner:run',
]
}
)
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from cron_sentry.version import VERSION
setup(
name='cron-sentry',
version=VERSION,
author='Yipit Coders',
author_email='coders@yipit.com',
description='Cron-Sentry is a command-line wrapper that reports unsuccessful runs to Sentry (https://www.getsentry.com)',
long_description=open('README.md').read(),
license='MIT',
classifiers=[
'Topic :: Utilities',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
],
url='http://github.com/yipit/cron-sentry',
packages=find_packages(),
install_requires=['raven', 'argparse'],
data_files=[],
entry_points={
'console_scripts': [
# `raven-cron` entry point is for backwards compatibility purposes.
# it should get removed in future releases
'raven-cron = cron_sentry.runner:run',
'cron-sentry = cron_sentry.runner:run',
]
}
)
|
Add missing package requirement of argparse.
|
Add missing package requirement of argparse.
|
Python
|
mit
|
ciiol/cron-sentry,sysadmind/cron-sentry
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from cron_sentry.version import VERSION
setup(
name='cron-sentry',
version=VERSION,
author='Yipit Coders',
author_email='coders@yipit.com',
description='Cron-Sentry is a command-line wrapper that reports unsuccessful runs to Sentry (https://www.getsentry.com)',
long_description=open('README.md').read(),
license='MIT',
classifiers=[
'Topic :: Utilities',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
],
url='http://github.com/yipit/cron-sentry',
packages=find_packages(),
install_requires=['raven'],
data_files=[],
entry_points={
'console_scripts': [
# `raven-cron` entry point is for backwards compatibility purposes.
# it should get removed in future releases
'raven-cron = cron_sentry.runner:run',
'cron-sentry = cron_sentry.runner:run',
]
}
)
Add missing package requirement of argparse.
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from cron_sentry.version import VERSION
setup(
name='cron-sentry',
version=VERSION,
author='Yipit Coders',
author_email='coders@yipit.com',
description='Cron-Sentry is a command-line wrapper that reports unsuccessful runs to Sentry (https://www.getsentry.com)',
long_description=open('README.md').read(),
license='MIT',
classifiers=[
'Topic :: Utilities',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
],
url='http://github.com/yipit/cron-sentry',
packages=find_packages(),
install_requires=['raven', 'argparse'],
data_files=[],
entry_points={
'console_scripts': [
# `raven-cron` entry point is for backwards compatibility purposes.
# it should get removed in future releases
'raven-cron = cron_sentry.runner:run',
'cron-sentry = cron_sentry.runner:run',
]
}
)
|
<commit_before>#!/usr/bin/env python
from setuptools import setup, find_packages
from cron_sentry.version import VERSION
setup(
name='cron-sentry',
version=VERSION,
author='Yipit Coders',
author_email='coders@yipit.com',
description='Cron-Sentry is a command-line wrapper that reports unsuccessful runs to Sentry (https://www.getsentry.com)',
long_description=open('README.md').read(),
license='MIT',
classifiers=[
'Topic :: Utilities',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
],
url='http://github.com/yipit/cron-sentry',
packages=find_packages(),
install_requires=['raven'],
data_files=[],
entry_points={
'console_scripts': [
# `raven-cron` entry point is for backwards compatibility purposes.
# it should get removed in future releases
'raven-cron = cron_sentry.runner:run',
'cron-sentry = cron_sentry.runner:run',
]
}
)
<commit_msg>Add missing package requirement of argparse.<commit_after>
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from cron_sentry.version import VERSION
setup(
name='cron-sentry',
version=VERSION,
author='Yipit Coders',
author_email='coders@yipit.com',
description='Cron-Sentry is a command-line wrapper that reports unsuccessful runs to Sentry (https://www.getsentry.com)',
long_description=open('README.md').read(),
license='MIT',
classifiers=[
'Topic :: Utilities',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
],
url='http://github.com/yipit/cron-sentry',
packages=find_packages(),
install_requires=['raven', 'argparse'],
data_files=[],
entry_points={
'console_scripts': [
# `raven-cron` entry point is for backwards compatibility purposes.
# it should get removed in future releases
'raven-cron = cron_sentry.runner:run',
'cron-sentry = cron_sentry.runner:run',
]
}
)
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from cron_sentry.version import VERSION
setup(
name='cron-sentry',
version=VERSION,
author='Yipit Coders',
author_email='coders@yipit.com',
description='Cron-Sentry is a command-line wrapper that reports unsuccessful runs to Sentry (https://www.getsentry.com)',
long_description=open('README.md').read(),
license='MIT',
classifiers=[
'Topic :: Utilities',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
],
url='http://github.com/yipit/cron-sentry',
packages=find_packages(),
install_requires=['raven'],
data_files=[],
entry_points={
'console_scripts': [
# `raven-cron` entry point is for backwards compatibility purposes.
# it should get removed in future releases
'raven-cron = cron_sentry.runner:run',
'cron-sentry = cron_sentry.runner:run',
]
}
)
Add missing package requirement of argparse.#!/usr/bin/env python
from setuptools import setup, find_packages
from cron_sentry.version import VERSION
setup(
name='cron-sentry',
version=VERSION,
author='Yipit Coders',
author_email='coders@yipit.com',
description='Cron-Sentry is a command-line wrapper that reports unsuccessful runs to Sentry (https://www.getsentry.com)',
long_description=open('README.md').read(),
license='MIT',
classifiers=[
'Topic :: Utilities',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
],
url='http://github.com/yipit/cron-sentry',
packages=find_packages(),
install_requires=['raven', 'argparse'],
data_files=[],
entry_points={
'console_scripts': [
# `raven-cron` entry point is for backwards compatibility purposes.
# it should get removed in future releases
'raven-cron = cron_sentry.runner:run',
'cron-sentry = cron_sentry.runner:run',
]
}
)
|
<commit_before>#!/usr/bin/env python
from setuptools import setup, find_packages
from cron_sentry.version import VERSION
setup(
name='cron-sentry',
version=VERSION,
author='Yipit Coders',
author_email='coders@yipit.com',
description='Cron-Sentry is a command-line wrapper that reports unsuccessful runs to Sentry (https://www.getsentry.com)',
long_description=open('README.md').read(),
license='MIT',
classifiers=[
'Topic :: Utilities',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
],
url='http://github.com/yipit/cron-sentry',
packages=find_packages(),
install_requires=['raven'],
data_files=[],
entry_points={
'console_scripts': [
# `raven-cron` entry point is for backwards compatibility purposes.
# it should get removed in future releases
'raven-cron = cron_sentry.runner:run',
'cron-sentry = cron_sentry.runner:run',
]
}
)
<commit_msg>Add missing package requirement of argparse.<commit_after>#!/usr/bin/env python
from setuptools import setup, find_packages
from cron_sentry.version import VERSION
setup(
name='cron-sentry',
version=VERSION,
author='Yipit Coders',
author_email='coders@yipit.com',
description='Cron-Sentry is a command-line wrapper that reports unsuccessful runs to Sentry (https://www.getsentry.com)',
long_description=open('README.md').read(),
license='MIT',
classifiers=[
'Topic :: Utilities',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
],
url='http://github.com/yipit/cron-sentry',
packages=find_packages(),
install_requires=['raven', 'argparse'],
data_files=[],
entry_points={
'console_scripts': [
# `raven-cron` entry point is for backwards compatibility purposes.
# it should get removed in future releases
'raven-cron = cron_sentry.runner:run',
'cron-sentry = cron_sentry.runner:run',
]
}
)
|
8e124cf84b400a910e8dfc0c0b2b3fda3b846bd5
|
sara_flexbe_states/src/sara_flexbe_states/Wonderland_Objects_In_A_Room.py
|
sara_flexbe_states/src/sara_flexbe_states/Wonderland_Objects_In_A_Room.py
|
#!/usr/bin/env python
# encoding=utf8
from flexbe_core import EventState, Logger
import json
class Wonderland_Objects_In_A_Room(EventState):
'''
Return all objects in a room
># json_text string json to read
#> names int[] array containing all names of objects in the room
#> ids id[] array containing all IDs of objects in the room
<= done return when the room contain at least one object
<= empty return when the room is empty
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(Wonderland_Objects_In_A_Room, self).__init__(outcomes=['done', 'empty', 'error'],
input_keys=['json_text'],
output_keys=['ids', 'names'])
def execute(self, userdata):
# parse parameter json data
datas = json.loads(userdata.json_text)
# read if there is data
if not datas:
# continue to Zero
return 'empty'
names = []
ids = []
for data in datas:
# try to read data
if 'id' not in data:
return 'error'
# try to read data
if 'name' not in data:
return 'error'
# write return datas
names += [data['name']]
ids += [data['id']]
userdata.names=names
userdata.ids=ids
print str(names)[1:-1]
print str(ids)[1:-1]
# continue to Done
return 'done'
|
Add a state listing all entities in a room
|
Add a state listing all entities in a room
|
Python
|
bsd-3-clause
|
WalkingMachine/sara_behaviors,WalkingMachine/sara_behaviors
|
Add a state listing all entities in a room
|
#!/usr/bin/env python
# encoding=utf8
from flexbe_core import EventState, Logger
import json
class Wonderland_Objects_In_A_Room(EventState):
'''
Return all objects in a room
># json_text string json to read
#> names int[] array containing all names of objects in the room
#> ids id[] array containing all IDs of objects in the room
<= done return when the room contain at least one object
<= empty return when the room is empty
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(Wonderland_Objects_In_A_Room, self).__init__(outcomes=['done', 'empty', 'error'],
input_keys=['json_text'],
output_keys=['ids', 'names'])
def execute(self, userdata):
# parse parameter json data
datas = json.loads(userdata.json_text)
# read if there is data
if not datas:
# continue to Zero
return 'empty'
names = []
ids = []
for data in datas:
# try to read data
if 'id' not in data:
return 'error'
# try to read data
if 'name' not in data:
return 'error'
# write return datas
names += [data['name']]
ids += [data['id']]
userdata.names=names
userdata.ids=ids
print str(names)[1:-1]
print str(ids)[1:-1]
# continue to Done
return 'done'
|
<commit_before><commit_msg>Add a state listing all entities in a room<commit_after>
|
#!/usr/bin/env python
# encoding=utf8
from flexbe_core import EventState, Logger
import json
class Wonderland_Objects_In_A_Room(EventState):
'''
Return all objects in a room
># json_text string json to read
#> names int[] array containing all names of objects in the room
#> ids id[] array containing all IDs of objects in the room
<= done return when the room contain at least one object
<= empty return when the room is empty
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(Wonderland_Objects_In_A_Room, self).__init__(outcomes=['done', 'empty', 'error'],
input_keys=['json_text'],
output_keys=['ids', 'names'])
def execute(self, userdata):
# parse parameter json data
datas = json.loads(userdata.json_text)
# read if there is data
if not datas:
# continue to Zero
return 'empty'
names = []
ids = []
for data in datas:
# try to read data
if 'id' not in data:
return 'error'
# try to read data
if 'name' not in data:
return 'error'
# write return datas
names += [data['name']]
ids += [data['id']]
userdata.names=names
userdata.ids=ids
print str(names)[1:-1]
print str(ids)[1:-1]
# continue to Done
return 'done'
|
Add a state listing all entities in a room#!/usr/bin/env python
# encoding=utf8
from flexbe_core import EventState, Logger
import json
class Wonderland_Objects_In_A_Room(EventState):
'''
Return all objects in a room
># json_text string json to read
#> names int[] array containing all names of objects in the room
#> ids id[] array containing all IDs of objects in the room
<= done return when the room contain at least one object
<= empty return when the room is empty
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(Wonderland_Objects_In_A_Room, self).__init__(outcomes=['done', 'empty', 'error'],
input_keys=['json_text'],
output_keys=['ids', 'names'])
def execute(self, userdata):
# parse parameter json data
datas = json.loads(userdata.json_text)
# read if there is data
if not datas:
# continue to Zero
return 'empty'
names = []
ids = []
for data in datas:
# try to read data
if 'id' not in data:
return 'error'
# try to read data
if 'name' not in data:
return 'error'
# write return datas
names += [data['name']]
ids += [data['id']]
userdata.names=names
userdata.ids=ids
print str(names)[1:-1]
print str(ids)[1:-1]
# continue to Done
return 'done'
|
<commit_before><commit_msg>Add a state listing all entities in a room<commit_after>#!/usr/bin/env python
# encoding=utf8
from flexbe_core import EventState, Logger
import json
class Wonderland_Objects_In_A_Room(EventState):
'''
Return all objects in a room
># json_text string json to read
#> names int[] array containing all names of objects in the room
#> ids id[] array containing all IDs of objects in the room
<= done return when the room contain at least one object
<= empty return when the room is empty
<= error return when error reading data
'''
def __init__(self):
# See example_state.py for basic explanations.
super(Wonderland_Objects_In_A_Room, self).__init__(outcomes=['done', 'empty', 'error'],
input_keys=['json_text'],
output_keys=['ids', 'names'])
def execute(self, userdata):
# parse parameter json data
datas = json.loads(userdata.json_text)
# read if there is data
if not datas:
# continue to Zero
return 'empty'
names = []
ids = []
for data in datas:
# try to read data
if 'id' not in data:
return 'error'
# try to read data
if 'name' not in data:
return 'error'
# write return datas
names += [data['name']]
ids += [data['id']]
userdata.names=names
userdata.ids=ids
print str(names)[1:-1]
print str(ids)[1:-1]
# continue to Done
return 'done'
|
|
59a1d93ca6e0ff20035e2aa9c3ffc83e453f0942
|
set1/challenge-2.py
|
set1/challenge-2.py
|
import base64
def fixed_xor(hex_a, hex_b):
decoded_a = base64.b16decode(hex_a, True)
decoded_b = base64.b16decode(hex_b, True)
xor_result = [chr(ord(a) ^ ord(b)) for a, b in zip(decoded_a, decoded_b)]
return base64.b16encode(''.join(xor_result))
if __name__ == '__main__':
hex_a = raw_input("hex_a> ")
hex_b = raw_input("hex_b> ")
print fixed_xor(hex_a, hex_b)
|
Add solution to challenge 2.
|
Add solution to challenge 2.
|
Python
|
mit
|
ericnorris/cryptopals-solutions
|
Add solution to challenge 2.
|
import base64
def fixed_xor(hex_a, hex_b):
decoded_a = base64.b16decode(hex_a, True)
decoded_b = base64.b16decode(hex_b, True)
xor_result = [chr(ord(a) ^ ord(b)) for a, b in zip(decoded_a, decoded_b)]
return base64.b16encode(''.join(xor_result))
if __name__ == '__main__':
hex_a = raw_input("hex_a> ")
hex_b = raw_input("hex_b> ")
print fixed_xor(hex_a, hex_b)
|
<commit_before><commit_msg>Add solution to challenge 2.<commit_after>
|
import base64
def fixed_xor(hex_a, hex_b):
decoded_a = base64.b16decode(hex_a, True)
decoded_b = base64.b16decode(hex_b, True)
xor_result = [chr(ord(a) ^ ord(b)) for a, b in zip(decoded_a, decoded_b)]
return base64.b16encode(''.join(xor_result))
if __name__ == '__main__':
hex_a = raw_input("hex_a> ")
hex_b = raw_input("hex_b> ")
print fixed_xor(hex_a, hex_b)
|
Add solution to challenge 2.import base64
def fixed_xor(hex_a, hex_b):
decoded_a = base64.b16decode(hex_a, True)
decoded_b = base64.b16decode(hex_b, True)
xor_result = [chr(ord(a) ^ ord(b)) for a, b in zip(decoded_a, decoded_b)]
return base64.b16encode(''.join(xor_result))
if __name__ == '__main__':
hex_a = raw_input("hex_a> ")
hex_b = raw_input("hex_b> ")
print fixed_xor(hex_a, hex_b)
|
<commit_before><commit_msg>Add solution to challenge 2.<commit_after>import base64
def fixed_xor(hex_a, hex_b):
decoded_a = base64.b16decode(hex_a, True)
decoded_b = base64.b16decode(hex_b, True)
xor_result = [chr(ord(a) ^ ord(b)) for a, b in zip(decoded_a, decoded_b)]
return base64.b16encode(''.join(xor_result))
if __name__ == '__main__':
hex_a = raw_input("hex_a> ")
hex_b = raw_input("hex_b> ")
print fixed_xor(hex_a, hex_b)
|
|
7f3acc09228460e4494ab7e4899edf3f131efc72
|
build/android/test_runner.py
|
build/android/test_runner.py
|
#!/usr/bin/env python
# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""
Runs tests on Android devices.
This script exists to avoid WebRTC being broken by changes in the Chrome Android
test execution toolchain.
"""
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.join(SCRIPT_DIR, os.pardir, os.pardir, os.pardir)
CHROMIUM_BUILD_ANDROID_DIR = os.path.join(ROOT_DIR, 'build', 'android')
sys.path.insert(0, CHROMIUM_BUILD_ANDROID_DIR)
import test_runner
from pylib.gtest import gtest_config
def main():
# Override the stable test suites with the WebRTC tests.
gtest_config.STABLE_TEST_SUITES = [
'audio_decoder_unittests',
'common_audio_unittests',
'common_video_unittests',
'modules_tests',
'modules_unittests',
'system_wrappers_unittests',
'test_support_unittests',
'tools_unittests',
'video_capture_tests',
'video_engine_tests',
'video_engine_core_unittests',
'voice_engine_unittests',
'webrtc_perf_tests',
]
return test_runner.main()
if __name__ == '__main__':
sys.exit(main())
|
Add Android test runner script for WebRTC.
|
Add Android test runner script for WebRTC.
The Android test execution toolchain scripts in Chromium
has been causing headaches for us several times. Mostly
because they're tailored at running Chrome tests only.
Wrapping their script in our own avoids the pain of
upstreaming new test names to Chromium and rolling them
in to get them running on our bots.
TESTED=Ran a test on a local device using:
webrtc/build/android/test_runner.py gtest -s audio_decoder_unittests --verbose --isolate-file-path webrtc/modules/audio_coding/neteq/audio_decoder_unittests.isolate --release
TBR=phoglund@webrtc.org
Review URL: https://webrtc-codereview.appspot.com/25269004
Cr-Mirrored-From: https://chromium.googlesource.com/external/webrtc
Cr-Mirrored-Commit: c3e097cdc547d4e25da665cde3ee6167573e8ec2
|
Python
|
bsd-3-clause
|
sippet/webrtc,sippet/webrtc,sippet/webrtc,sippet/webrtc,sippet/webrtc,sippet/webrtc
|
Add Android test runner script for WebRTC.
The Android test execution toolchain scripts in Chromium
has been causing headaches for us several times. Mostly
because they're tailored at running Chrome tests only.
Wrapping their script in our own avoids the pain of
upstreaming new test names to Chromium and rolling them
in to get them running on our bots.
TESTED=Ran a test on a local device using:
webrtc/build/android/test_runner.py gtest -s audio_decoder_unittests --verbose --isolate-file-path webrtc/modules/audio_coding/neteq/audio_decoder_unittests.isolate --release
TBR=phoglund@webrtc.org
Review URL: https://webrtc-codereview.appspot.com/25269004
Cr-Mirrored-From: https://chromium.googlesource.com/external/webrtc
Cr-Mirrored-Commit: c3e097cdc547d4e25da665cde3ee6167573e8ec2
|
#!/usr/bin/env python
# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""
Runs tests on Android devices.
This script exists to avoid WebRTC being broken by changes in the Chrome Android
test execution toolchain.
"""
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.join(SCRIPT_DIR, os.pardir, os.pardir, os.pardir)
CHROMIUM_BUILD_ANDROID_DIR = os.path.join(ROOT_DIR, 'build', 'android')
sys.path.insert(0, CHROMIUM_BUILD_ANDROID_DIR)
import test_runner
from pylib.gtest import gtest_config
def main():
# Override the stable test suites with the WebRTC tests.
gtest_config.STABLE_TEST_SUITES = [
'audio_decoder_unittests',
'common_audio_unittests',
'common_video_unittests',
'modules_tests',
'modules_unittests',
'system_wrappers_unittests',
'test_support_unittests',
'tools_unittests',
'video_capture_tests',
'video_engine_tests',
'video_engine_core_unittests',
'voice_engine_unittests',
'webrtc_perf_tests',
]
return test_runner.main()
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add Android test runner script for WebRTC.
The Android test execution toolchain scripts in Chromium
has been causing headaches for us several times. Mostly
because they're tailored at running Chrome tests only.
Wrapping their script in our own avoids the pain of
upstreaming new test names to Chromium and rolling them
in to get them running on our bots.
TESTED=Ran a test on a local device using:
webrtc/build/android/test_runner.py gtest -s audio_decoder_unittests --verbose --isolate-file-path webrtc/modules/audio_coding/neteq/audio_decoder_unittests.isolate --release
TBR=phoglund@webrtc.org
Review URL: https://webrtc-codereview.appspot.com/25269004
Cr-Mirrored-From: https://chromium.googlesource.com/external/webrtc
Cr-Mirrored-Commit: c3e097cdc547d4e25da665cde3ee6167573e8ec2<commit_after>
|
#!/usr/bin/env python
# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""
Runs tests on Android devices.
This script exists to avoid WebRTC being broken by changes in the Chrome Android
test execution toolchain.
"""
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.join(SCRIPT_DIR, os.pardir, os.pardir, os.pardir)
CHROMIUM_BUILD_ANDROID_DIR = os.path.join(ROOT_DIR, 'build', 'android')
sys.path.insert(0, CHROMIUM_BUILD_ANDROID_DIR)
import test_runner
from pylib.gtest import gtest_config
def main():
# Override the stable test suites with the WebRTC tests.
gtest_config.STABLE_TEST_SUITES = [
'audio_decoder_unittests',
'common_audio_unittests',
'common_video_unittests',
'modules_tests',
'modules_unittests',
'system_wrappers_unittests',
'test_support_unittests',
'tools_unittests',
'video_capture_tests',
'video_engine_tests',
'video_engine_core_unittests',
'voice_engine_unittests',
'webrtc_perf_tests',
]
return test_runner.main()
if __name__ == '__main__':
sys.exit(main())
|
Add Android test runner script for WebRTC.
The Android test execution toolchain scripts in Chromium
has been causing headaches for us several times. Mostly
because they're tailored at running Chrome tests only.
Wrapping their script in our own avoids the pain of
upstreaming new test names to Chromium and rolling them
in to get them running on our bots.
TESTED=Ran a test on a local device using:
webrtc/build/android/test_runner.py gtest -s audio_decoder_unittests --verbose --isolate-file-path webrtc/modules/audio_coding/neteq/audio_decoder_unittests.isolate --release
TBR=phoglund@webrtc.org
Review URL: https://webrtc-codereview.appspot.com/25269004
Cr-Mirrored-From: https://chromium.googlesource.com/external/webrtc
Cr-Mirrored-Commit: c3e097cdc547d4e25da665cde3ee6167573e8ec2#!/usr/bin/env python
# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""
Runs tests on Android devices.
This script exists to avoid WebRTC being broken by changes in the Chrome Android
test execution toolchain.
"""
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.join(SCRIPT_DIR, os.pardir, os.pardir, os.pardir)
CHROMIUM_BUILD_ANDROID_DIR = os.path.join(ROOT_DIR, 'build', 'android')
sys.path.insert(0, CHROMIUM_BUILD_ANDROID_DIR)
import test_runner
from pylib.gtest import gtest_config
def main():
# Override the stable test suites with the WebRTC tests.
gtest_config.STABLE_TEST_SUITES = [
'audio_decoder_unittests',
'common_audio_unittests',
'common_video_unittests',
'modules_tests',
'modules_unittests',
'system_wrappers_unittests',
'test_support_unittests',
'tools_unittests',
'video_capture_tests',
'video_engine_tests',
'video_engine_core_unittests',
'voice_engine_unittests',
'webrtc_perf_tests',
]
return test_runner.main()
if __name__ == '__main__':
sys.exit(main())
|
<commit_before><commit_msg>Add Android test runner script for WebRTC.
The Android test execution toolchain scripts in Chromium
has been causing headaches for us several times. Mostly
because they're tailored at running Chrome tests only.
Wrapping their script in our own avoids the pain of
upstreaming new test names to Chromium and rolling them
in to get them running on our bots.
TESTED=Ran a test on a local device using:
webrtc/build/android/test_runner.py gtest -s audio_decoder_unittests --verbose --isolate-file-path webrtc/modules/audio_coding/neteq/audio_decoder_unittests.isolate --release
TBR=phoglund@webrtc.org
Review URL: https://webrtc-codereview.appspot.com/25269004
Cr-Mirrored-From: https://chromium.googlesource.com/external/webrtc
Cr-Mirrored-Commit: c3e097cdc547d4e25da665cde3ee6167573e8ec2<commit_after>#!/usr/bin/env python
# Copyright (c) 2014 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""
Runs tests on Android devices.
This script exists to avoid WebRTC being broken by changes in the Chrome Android
test execution toolchain.
"""
import os
import sys
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.join(SCRIPT_DIR, os.pardir, os.pardir, os.pardir)
CHROMIUM_BUILD_ANDROID_DIR = os.path.join(ROOT_DIR, 'build', 'android')
sys.path.insert(0, CHROMIUM_BUILD_ANDROID_DIR)
import test_runner
from pylib.gtest import gtest_config
def main():
# Override the stable test suites with the WebRTC tests.
gtest_config.STABLE_TEST_SUITES = [
'audio_decoder_unittests',
'common_audio_unittests',
'common_video_unittests',
'modules_tests',
'modules_unittests',
'system_wrappers_unittests',
'test_support_unittests',
'tools_unittests',
'video_capture_tests',
'video_engine_tests',
'video_engine_core_unittests',
'voice_engine_unittests',
'webrtc_perf_tests',
]
return test_runner.main()
if __name__ == '__main__':
sys.exit(main())
|
|
37dee83ed478eba2079bd6eb98c39d7ba2872c7d
|
multigtfs/management/commands/refreshgeometries.py
|
multigtfs/management/commands/refreshgeometries.py
|
#
# Copyright 2012-2014 John Whitlock
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from multigtfs.models import Feed, Route, Shape, Trip
class Command(BaseCommand):
args = '--all | <feed ID 1> <feed ID 2> ...'
help = 'Updates the cached geometry of GTFS feeds'
option_list = BaseCommand.option_list + (
make_option(
'-a', '--all', action='store_true', dest='all', default=False,
help='update all feeds'),
make_option(
'-q', '--quiet', action='store_false', dest='verbose',
default=True, help="don't print status messages to stdout"),
)
def handle(self, *args, **options):
# Validate the arguments
all_feeds = options.get('all')
verbose = options.get('verbose')
if len(args) == 0 and not all_feeds:
raise CommandError('You must pass in feed ID or --all.')
if len(args) > 0 and all_feeds:
raise CommandError("You can't specify a feeds and --all.")
# Get the feeds
if all_feeds:
feeds = Feed.objects.order_by('id')
else:
feeds = []
feed_ids = [int(a) for a in args]
for feed_id in feed_ids:
try:
feeds.append(Feed.objects.get(id=feed_id))
except Feed.DoesNotExist:
raise CommandError('Feed %s not found' % feed_id)
# Refresh the geometries
for feed in feeds:
if verbose:
self.stdout.write(
"Updating geometries in Feed %s (ID %s)...\n" % (
feed.name, feed.id))
shapes = Shape.objects.in_feed(feed)
trips = Trip.objects.in_feed(feed)
routes = Route.objects.in_feed(feed)
for shape in shapes:
shape.update_geometry(update_parent=False)
for trip in trips:
trip.update_geometry(update_parent=False)
for route in routes:
route.update_geometry()
if verbose:
self.stdout.write(
"Feed %d: Updated geometries in %d shape%s, %d trip%s, and"
" %d route%s." % (
feed.id,
shapes.count(), '' if shapes.count() == 1 else 's',
trips.count(), '' if trips.count() == 1 else 's',
routes.count(), '' if routes.count() == 1 else 's'))
|
Add mgmt command for refreshing cached geometries
|
Add mgmt command for refreshing cached geometries
|
Python
|
apache-2.0
|
tulsawebdevs/django-multi-gtfs,tulsawebdevs/django-multi-gtfs,inmagik/django-multi-gtfs,inmagik/django-multi-gtfs,inmagik/django-multi-gtfs,inmagik/django-multi-gtfs
|
Add mgmt command for refreshing cached geometries
|
#
# Copyright 2012-2014 John Whitlock
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from multigtfs.models import Feed, Route, Shape, Trip
class Command(BaseCommand):
args = '--all | <feed ID 1> <feed ID 2> ...'
help = 'Updates the cached geometry of GTFS feeds'
option_list = BaseCommand.option_list + (
make_option(
'-a', '--all', action='store_true', dest='all', default=False,
help='update all feeds'),
make_option(
'-q', '--quiet', action='store_false', dest='verbose',
default=True, help="don't print status messages to stdout"),
)
def handle(self, *args, **options):
# Validate the arguments
all_feeds = options.get('all')
verbose = options.get('verbose')
if len(args) == 0 and not all_feeds:
raise CommandError('You must pass in feed ID or --all.')
if len(args) > 0 and all_feeds:
raise CommandError("You can't specify a feeds and --all.")
# Get the feeds
if all_feeds:
feeds = Feed.objects.order_by('id')
else:
feeds = []
feed_ids = [int(a) for a in args]
for feed_id in feed_ids:
try:
feeds.append(Feed.objects.get(id=feed_id))
except Feed.DoesNotExist:
raise CommandError('Feed %s not found' % feed_id)
# Refresh the geometries
for feed in feeds:
if verbose:
self.stdout.write(
"Updating geometries in Feed %s (ID %s)...\n" % (
feed.name, feed.id))
shapes = Shape.objects.in_feed(feed)
trips = Trip.objects.in_feed(feed)
routes = Route.objects.in_feed(feed)
for shape in shapes:
shape.update_geometry(update_parent=False)
for trip in trips:
trip.update_geometry(update_parent=False)
for route in routes:
route.update_geometry()
if verbose:
self.stdout.write(
"Feed %d: Updated geometries in %d shape%s, %d trip%s, and"
" %d route%s." % (
feed.id,
shapes.count(), '' if shapes.count() == 1 else 's',
trips.count(), '' if trips.count() == 1 else 's',
routes.count(), '' if routes.count() == 1 else 's'))
|
<commit_before><commit_msg>Add mgmt command for refreshing cached geometries<commit_after>
|
#
# Copyright 2012-2014 John Whitlock
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from multigtfs.models import Feed, Route, Shape, Trip
class Command(BaseCommand):
args = '--all | <feed ID 1> <feed ID 2> ...'
help = 'Updates the cached geometry of GTFS feeds'
option_list = BaseCommand.option_list + (
make_option(
'-a', '--all', action='store_true', dest='all', default=False,
help='update all feeds'),
make_option(
'-q', '--quiet', action='store_false', dest='verbose',
default=True, help="don't print status messages to stdout"),
)
def handle(self, *args, **options):
# Validate the arguments
all_feeds = options.get('all')
verbose = options.get('verbose')
if len(args) == 0 and not all_feeds:
raise CommandError('You must pass in feed ID or --all.')
if len(args) > 0 and all_feeds:
raise CommandError("You can't specify a feeds and --all.")
# Get the feeds
if all_feeds:
feeds = Feed.objects.order_by('id')
else:
feeds = []
feed_ids = [int(a) for a in args]
for feed_id in feed_ids:
try:
feeds.append(Feed.objects.get(id=feed_id))
except Feed.DoesNotExist:
raise CommandError('Feed %s not found' % feed_id)
# Refresh the geometries
for feed in feeds:
if verbose:
self.stdout.write(
"Updating geometries in Feed %s (ID %s)...\n" % (
feed.name, feed.id))
shapes = Shape.objects.in_feed(feed)
trips = Trip.objects.in_feed(feed)
routes = Route.objects.in_feed(feed)
for shape in shapes:
shape.update_geometry(update_parent=False)
for trip in trips:
trip.update_geometry(update_parent=False)
for route in routes:
route.update_geometry()
if verbose:
self.stdout.write(
"Feed %d: Updated geometries in %d shape%s, %d trip%s, and"
" %d route%s." % (
feed.id,
shapes.count(), '' if shapes.count() == 1 else 's',
trips.count(), '' if trips.count() == 1 else 's',
routes.count(), '' if routes.count() == 1 else 's'))
|
Add mgmt command for refreshing cached geometries#
# Copyright 2012-2014 John Whitlock
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from multigtfs.models import Feed, Route, Shape, Trip
class Command(BaseCommand):
args = '--all | <feed ID 1> <feed ID 2> ...'
help = 'Updates the cached geometry of GTFS feeds'
option_list = BaseCommand.option_list + (
make_option(
'-a', '--all', action='store_true', dest='all', default=False,
help='update all feeds'),
make_option(
'-q', '--quiet', action='store_false', dest='verbose',
default=True, help="don't print status messages to stdout"),
)
def handle(self, *args, **options):
# Validate the arguments
all_feeds = options.get('all')
verbose = options.get('verbose')
if len(args) == 0 and not all_feeds:
raise CommandError('You must pass in feed ID or --all.')
if len(args) > 0 and all_feeds:
raise CommandError("You can't specify a feeds and --all.")
# Get the feeds
if all_feeds:
feeds = Feed.objects.order_by('id')
else:
feeds = []
feed_ids = [int(a) for a in args]
for feed_id in feed_ids:
try:
feeds.append(Feed.objects.get(id=feed_id))
except Feed.DoesNotExist:
raise CommandError('Feed %s not found' % feed_id)
# Refresh the geometries
for feed in feeds:
if verbose:
self.stdout.write(
"Updating geometries in Feed %s (ID %s)...\n" % (
feed.name, feed.id))
shapes = Shape.objects.in_feed(feed)
trips = Trip.objects.in_feed(feed)
routes = Route.objects.in_feed(feed)
for shape in shapes:
shape.update_geometry(update_parent=False)
for trip in trips:
trip.update_geometry(update_parent=False)
for route in routes:
route.update_geometry()
if verbose:
self.stdout.write(
"Feed %d: Updated geometries in %d shape%s, %d trip%s, and"
" %d route%s." % (
feed.id,
shapes.count(), '' if shapes.count() == 1 else 's',
trips.count(), '' if trips.count() == 1 else 's',
routes.count(), '' if routes.count() == 1 else 's'))
|
<commit_before><commit_msg>Add mgmt command for refreshing cached geometries<commit_after>#
# Copyright 2012-2014 John Whitlock
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from multigtfs.models import Feed, Route, Shape, Trip
class Command(BaseCommand):
args = '--all | <feed ID 1> <feed ID 2> ...'
help = 'Updates the cached geometry of GTFS feeds'
option_list = BaseCommand.option_list + (
make_option(
'-a', '--all', action='store_true', dest='all', default=False,
help='update all feeds'),
make_option(
'-q', '--quiet', action='store_false', dest='verbose',
default=True, help="don't print status messages to stdout"),
)
def handle(self, *args, **options):
# Validate the arguments
all_feeds = options.get('all')
verbose = options.get('verbose')
if len(args) == 0 and not all_feeds:
raise CommandError('You must pass in feed ID or --all.')
if len(args) > 0 and all_feeds:
raise CommandError("You can't specify a feeds and --all.")
# Get the feeds
if all_feeds:
feeds = Feed.objects.order_by('id')
else:
feeds = []
feed_ids = [int(a) for a in args]
for feed_id in feed_ids:
try:
feeds.append(Feed.objects.get(id=feed_id))
except Feed.DoesNotExist:
raise CommandError('Feed %s not found' % feed_id)
# Refresh the geometries
for feed in feeds:
if verbose:
self.stdout.write(
"Updating geometries in Feed %s (ID %s)...\n" % (
feed.name, feed.id))
shapes = Shape.objects.in_feed(feed)
trips = Trip.objects.in_feed(feed)
routes = Route.objects.in_feed(feed)
for shape in shapes:
shape.update_geometry(update_parent=False)
for trip in trips:
trip.update_geometry(update_parent=False)
for route in routes:
route.update_geometry()
if verbose:
self.stdout.write(
"Feed %d: Updated geometries in %d shape%s, %d trip%s, and"
" %d route%s." % (
feed.id,
shapes.count(), '' if shapes.count() == 1 else 's',
trips.count(), '' if trips.count() == 1 else 's',
routes.count(), '' if routes.count() == 1 else 's'))
|
|
252bbb9c2eb4623c46e6ad5c78ce0299c2ba433b
|
misc/create_test_identifier.py
|
misc/create_test_identifier.py
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Creates an identifier in the same scheme as the ID minter.
Useful for creating test IDs.
ABSOLUTELY NOT FOR PRODUCTION USE.
"""
import random
import string
allowed_chars = [
char
for char in (string.ascii_lowercase + string.digits)
if char not in '0oil1'
]
while True:
x = ''.join(random.choice(allowed_chars) for _ in range(8))
if not x.startswith(tuple(string.digits)):
print(x)
break
|
Check in my script for creating test IDs
|
Check in my script for creating test IDs
[skip ci]
|
Python
|
mit
|
wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api,wellcometrust/platform-api
|
Check in my script for creating test IDs
[skip ci]
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Creates an identifier in the same scheme as the ID minter.
Useful for creating test IDs.
ABSOLUTELY NOT FOR PRODUCTION USE.
"""
import random
import string
allowed_chars = [
char
for char in (string.ascii_lowercase + string.digits)
if char not in '0oil1'
]
while True:
x = ''.join(random.choice(allowed_chars) for _ in range(8))
if not x.startswith(tuple(string.digits)):
print(x)
break
|
<commit_before><commit_msg>Check in my script for creating test IDs
[skip ci]<commit_after>
|
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Creates an identifier in the same scheme as the ID minter.
Useful for creating test IDs.
ABSOLUTELY NOT FOR PRODUCTION USE.
"""
import random
import string
allowed_chars = [
char
for char in (string.ascii_lowercase + string.digits)
if char not in '0oil1'
]
while True:
x = ''.join(random.choice(allowed_chars) for _ in range(8))
if not x.startswith(tuple(string.digits)):
print(x)
break
|
Check in my script for creating test IDs
[skip ci]#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Creates an identifier in the same scheme as the ID minter.
Useful for creating test IDs.
ABSOLUTELY NOT FOR PRODUCTION USE.
"""
import random
import string
allowed_chars = [
char
for char in (string.ascii_lowercase + string.digits)
if char not in '0oil1'
]
while True:
x = ''.join(random.choice(allowed_chars) for _ in range(8))
if not x.startswith(tuple(string.digits)):
print(x)
break
|
<commit_before><commit_msg>Check in my script for creating test IDs
[skip ci]<commit_after>#!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
Creates an identifier in the same scheme as the ID minter.
Useful for creating test IDs.
ABSOLUTELY NOT FOR PRODUCTION USE.
"""
import random
import string
allowed_chars = [
char
for char in (string.ascii_lowercase + string.digits)
if char not in '0oil1'
]
while True:
x = ''.join(random.choice(allowed_chars) for _ in range(8))
if not x.startswith(tuple(string.digits)):
print(x)
break
|
|
fae3703d64c0f4975612ccc1ba7710b47dfec8b9
|
tests/test_connection.py
|
tests/test_connection.py
|
from django.test import TestCase
from revproxy import connection
class TestOutput(TestCase):
def setUp(self):
self.connection = connection.HTTPConnectionPool.ConnectionCls('example.com')
def test_byte_url(self):
"""Output strings are always byte strings, even using Python 3"""
mock_output = b'mock output'
connection._output(self.connection, mock_output)
self.assertEqual(self.connection._buffer, [mock_output])
def test_host_is_first(self):
"""Make sure the host line is second in the request"""
mock_host_output = b'host: example.com'
for output in [b'GET / HTTP/1.1', b'before', mock_host_output, b'after']:
connection._output(self.connection, output)
self.assertEqual(self.connection._buffer[1], mock_host_output)
|
Add tests for output as bytes and testing that the host line is second
|
Add tests for output as bytes and testing that the host line is second
|
Python
|
mpl-2.0
|
TracyWebTech/django-revproxy,TracyWebTech/django-revproxy
|
Add tests for output as bytes and testing that the host line is second
|
from django.test import TestCase
from revproxy import connection
class TestOutput(TestCase):
def setUp(self):
self.connection = connection.HTTPConnectionPool.ConnectionCls('example.com')
def test_byte_url(self):
"""Output strings are always byte strings, even using Python 3"""
mock_output = b'mock output'
connection._output(self.connection, mock_output)
self.assertEqual(self.connection._buffer, [mock_output])
def test_host_is_first(self):
"""Make sure the host line is second in the request"""
mock_host_output = b'host: example.com'
for output in [b'GET / HTTP/1.1', b'before', mock_host_output, b'after']:
connection._output(self.connection, output)
self.assertEqual(self.connection._buffer[1], mock_host_output)
|
<commit_before><commit_msg>Add tests for output as bytes and testing that the host line is second<commit_after>
|
from django.test import TestCase
from revproxy import connection
class TestOutput(TestCase):
def setUp(self):
self.connection = connection.HTTPConnectionPool.ConnectionCls('example.com')
def test_byte_url(self):
"""Output strings are always byte strings, even using Python 3"""
mock_output = b'mock output'
connection._output(self.connection, mock_output)
self.assertEqual(self.connection._buffer, [mock_output])
def test_host_is_first(self):
"""Make sure the host line is second in the request"""
mock_host_output = b'host: example.com'
for output in [b'GET / HTTP/1.1', b'before', mock_host_output, b'after']:
connection._output(self.connection, output)
self.assertEqual(self.connection._buffer[1], mock_host_output)
|
Add tests for output as bytes and testing that the host line is secondfrom django.test import TestCase
from revproxy import connection
class TestOutput(TestCase):
def setUp(self):
self.connection = connection.HTTPConnectionPool.ConnectionCls('example.com')
def test_byte_url(self):
"""Output strings are always byte strings, even using Python 3"""
mock_output = b'mock output'
connection._output(self.connection, mock_output)
self.assertEqual(self.connection._buffer, [mock_output])
def test_host_is_first(self):
"""Make sure the host line is second in the request"""
mock_host_output = b'host: example.com'
for output in [b'GET / HTTP/1.1', b'before', mock_host_output, b'after']:
connection._output(self.connection, output)
self.assertEqual(self.connection._buffer[1], mock_host_output)
|
<commit_before><commit_msg>Add tests for output as bytes and testing that the host line is second<commit_after>from django.test import TestCase
from revproxy import connection
class TestOutput(TestCase):
def setUp(self):
self.connection = connection.HTTPConnectionPool.ConnectionCls('example.com')
def test_byte_url(self):
"""Output strings are always byte strings, even using Python 3"""
mock_output = b'mock output'
connection._output(self.connection, mock_output)
self.assertEqual(self.connection._buffer, [mock_output])
def test_host_is_first(self):
"""Make sure the host line is second in the request"""
mock_host_output = b'host: example.com'
for output in [b'GET / HTTP/1.1', b'before', mock_host_output, b'after']:
connection._output(self.connection, output)
self.assertEqual(self.connection._buffer[1], mock_host_output)
|
|
1856e8102b709176e46c3751bd21234dd5d1eacc
|
src/nodeconductor_assembly_waldur/packages/tests/unittests/test_admin.py
|
src/nodeconductor_assembly_waldur/packages/tests/unittests/test_admin.py
|
from django.test import TestCase
from .. import factories
from ... import admin, models
class TestPackageComponentForm(TestCase):
def test_package_component_form_is_valid_when_component_price_is_0(self):
data = {
'monthly_price': '0',
'amount': '2',
'type': models.PackageComponent.Types.RAM,
'price': '9', # price is required but not used in form validation.
}
form = admin.PackageComponentForm(data=data)
self.assertTrue(form.is_valid())
def test_package_component_form_is_invalid_if_package_template_has_connected_packages_already(self):
template = factories.PackageTemplateFactory()
factories.OpenStackPackageFactory(template=template)
instance = template.components.first()
data = {
'monthly_price': '0',
'amount': '2',
'type': instance.type,
'price': '9', # price is required but not used in form validation.
}
form = admin.PackageComponentForm(data=data, instance=instance)
self.assertFalse(form.is_valid())
|
Add unit tests for package component form
|
Add unit tests for package component form [WAL-450]
|
Python
|
mit
|
opennode/waldur-mastermind,opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur,opennode/waldur-mastermind,opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur,opennode/nodeconductor-assembly-waldur
|
Add unit tests for package component form [WAL-450]
|
from django.test import TestCase
from .. import factories
from ... import admin, models
class TestPackageComponentForm(TestCase):
def test_package_component_form_is_valid_when_component_price_is_0(self):
data = {
'monthly_price': '0',
'amount': '2',
'type': models.PackageComponent.Types.RAM,
'price': '9', # price is required but not used in form validation.
}
form = admin.PackageComponentForm(data=data)
self.assertTrue(form.is_valid())
def test_package_component_form_is_invalid_if_package_template_has_connected_packages_already(self):
template = factories.PackageTemplateFactory()
factories.OpenStackPackageFactory(template=template)
instance = template.components.first()
data = {
'monthly_price': '0',
'amount': '2',
'type': instance.type,
'price': '9', # price is required but not used in form validation.
}
form = admin.PackageComponentForm(data=data, instance=instance)
self.assertFalse(form.is_valid())
|
<commit_before><commit_msg>Add unit tests for package component form [WAL-450]<commit_after>
|
from django.test import TestCase
from .. import factories
from ... import admin, models
class TestPackageComponentForm(TestCase):
def test_package_component_form_is_valid_when_component_price_is_0(self):
data = {
'monthly_price': '0',
'amount': '2',
'type': models.PackageComponent.Types.RAM,
'price': '9', # price is required but not used in form validation.
}
form = admin.PackageComponentForm(data=data)
self.assertTrue(form.is_valid())
def test_package_component_form_is_invalid_if_package_template_has_connected_packages_already(self):
template = factories.PackageTemplateFactory()
factories.OpenStackPackageFactory(template=template)
instance = template.components.first()
data = {
'monthly_price': '0',
'amount': '2',
'type': instance.type,
'price': '9', # price is required but not used in form validation.
}
form = admin.PackageComponentForm(data=data, instance=instance)
self.assertFalse(form.is_valid())
|
Add unit tests for package component form [WAL-450]from django.test import TestCase
from .. import factories
from ... import admin, models
class TestPackageComponentForm(TestCase):
def test_package_component_form_is_valid_when_component_price_is_0(self):
data = {
'monthly_price': '0',
'amount': '2',
'type': models.PackageComponent.Types.RAM,
'price': '9', # price is required but not used in form validation.
}
form = admin.PackageComponentForm(data=data)
self.assertTrue(form.is_valid())
def test_package_component_form_is_invalid_if_package_template_has_connected_packages_already(self):
template = factories.PackageTemplateFactory()
factories.OpenStackPackageFactory(template=template)
instance = template.components.first()
data = {
'monthly_price': '0',
'amount': '2',
'type': instance.type,
'price': '9', # price is required but not used in form validation.
}
form = admin.PackageComponentForm(data=data, instance=instance)
self.assertFalse(form.is_valid())
|
<commit_before><commit_msg>Add unit tests for package component form [WAL-450]<commit_after>from django.test import TestCase
from .. import factories
from ... import admin, models
class TestPackageComponentForm(TestCase):
def test_package_component_form_is_valid_when_component_price_is_0(self):
data = {
'monthly_price': '0',
'amount': '2',
'type': models.PackageComponent.Types.RAM,
'price': '9', # price is required but not used in form validation.
}
form = admin.PackageComponentForm(data=data)
self.assertTrue(form.is_valid())
def test_package_component_form_is_invalid_if_package_template_has_connected_packages_already(self):
template = factories.PackageTemplateFactory()
factories.OpenStackPackageFactory(template=template)
instance = template.components.first()
data = {
'monthly_price': '0',
'amount': '2',
'type': instance.type,
'price': '9', # price is required but not used in form validation.
}
form = admin.PackageComponentForm(data=data, instance=instance)
self.assertFalse(form.is_valid())
|
|
9f6673afa14dda9c4d32b3b30bd8e4f1a0489269
|
Problem_04_palindrome/problem_4.py
|
Problem_04_palindrome/problem_4.py
|
def is_palindrome(number):
reversed_number = 0
n = number
while(n):
reversed_number = reversed_number * 10 + (n % 10)
#print("reversed_number:", reversed_number)
n //= 10
#print("n:", n)
return number == reversed_number
largest_palindrome = -1
for i in range(100, 1000):
#print("i:", i)
for j in range(100, 1000):
product = i * j
if is_palindrome(product) and product > largest_palindrome:
largest_palindrome = product
print("Palindrome:", product)
|
Solve problem 2 in Python
|
Solve problem 2 in Python
|
Python
|
mit
|
sirodoht/project-euler,sirodoht/project-euler,sirodoht/project-euler
|
Solve problem 2 in Python
|
def is_palindrome(number):
reversed_number = 0
n = number
while(n):
reversed_number = reversed_number * 10 + (n % 10)
#print("reversed_number:", reversed_number)
n //= 10
#print("n:", n)
return number == reversed_number
largest_palindrome = -1
for i in range(100, 1000):
#print("i:", i)
for j in range(100, 1000):
product = i * j
if is_palindrome(product) and product > largest_palindrome:
largest_palindrome = product
print("Palindrome:", product)
|
<commit_before><commit_msg>Solve problem 2 in Python<commit_after>
|
def is_palindrome(number):
reversed_number = 0
n = number
while(n):
reversed_number = reversed_number * 10 + (n % 10)
#print("reversed_number:", reversed_number)
n //= 10
#print("n:", n)
return number == reversed_number
largest_palindrome = -1
for i in range(100, 1000):
#print("i:", i)
for j in range(100, 1000):
product = i * j
if is_palindrome(product) and product > largest_palindrome:
largest_palindrome = product
print("Palindrome:", product)
|
Solve problem 2 in Pythondef is_palindrome(number):
reversed_number = 0
n = number
while(n):
reversed_number = reversed_number * 10 + (n % 10)
#print("reversed_number:", reversed_number)
n //= 10
#print("n:", n)
return number == reversed_number
largest_palindrome = -1
for i in range(100, 1000):
#print("i:", i)
for j in range(100, 1000):
product = i * j
if is_palindrome(product) and product > largest_palindrome:
largest_palindrome = product
print("Palindrome:", product)
|
<commit_before><commit_msg>Solve problem 2 in Python<commit_after>def is_palindrome(number):
reversed_number = 0
n = number
while(n):
reversed_number = reversed_number * 10 + (n % 10)
#print("reversed_number:", reversed_number)
n //= 10
#print("n:", n)
return number == reversed_number
largest_palindrome = -1
for i in range(100, 1000):
#print("i:", i)
for j in range(100, 1000):
product = i * j
if is_palindrome(product) and product > largest_palindrome:
largest_palindrome = product
print("Palindrome:", product)
|
|
703de44925dd4fe2d06aa4dcda51b56bad77af2f
|
test/benchmarks/startup/TestStartupDelays.py
|
test/benchmarks/startup/TestStartupDelays.py
|
"""Test lldb's startup delays creating a target and setting a breakpoint."""
import os, sys
import unittest2
import lldb
import pexpect
from lldbbench import *
class StartupDelaysBench(BenchBase):
mydir = os.path.join("benchmarks", "startup")
def setUp(self):
BenchBase.setUp(self)
# Create self.stopwatch2 for measuring "set first breakpoint".
# The default self.stopwatch is for "create fresh target".
self.stopwatch2 = Stopwatch()
if lldb.bmExecutable:
self.exe = lldb.bmExecutable
else:
self.exe = self.lldbHere
if lldb.bmBreakpointSpec:
self.break_spec = lldb.bmBreakpointSpec
else:
self.break_spec = '-n main'
self.count = lldb.bmIterationCount
if self.count <= 0:
self.count = 15
@benchmarks_test
def test_startup_delay(self):
"""Test start up delays creating a target and setting a breakpoint."""
print
self.run_startup_delays_bench(self.exe, self.break_spec, self.count)
print "lldb startup delays benchmark:"
print "create fresh target:", self.stopwatch
print "set first breakpoint:", self.stopwatch2
def run_startup_delays_bench(self, exe, break_spec, count):
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
prompt = self.child_prompt
# Reset the stopwatchs now.
self.stopwatch.reset()
self.stopwatch2.reset()
for i in range(count):
# So that the child gets torn down after the test.
self.child = pexpect.spawn('%s %s' % (self.lldbHere, self.lldbOption))
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
with self.stopwatch:
# Create a fresh target.
child.sendline('file %s' % exe) # Aka 'target create'.
child.expect_exact(prompt)
with self.stopwatch2:
# Read debug info and set the first breakpoint.
child.sendline('breakpoint set %s' % break_spec)
child.expect_exact(prompt)
child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
# The test is about to end and if we come to here, the child process has
# been terminated. Mark it so.
self.child = None
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
Add a benchmark for measuring start up delays of lldb, including:
|
Add a benchmark for measuring start up delays of lldb, including:
o create a fresh target; and
o set the first breakpoint
Example (using lldb to set a breakpoint on lldb's Driver::MainLoop function):
./dotest.py -v +b -x '-F Driver::MainLoop()' -p TestStartupDelays.py
...
1: test_startup_delay (TestStartupDelays.StartupDelaysBench)
Test start up delays creating a target and setting a breakpoint. ...
lldb startup delays benchmark:
create fresh target: Avg: 0.106732 (Laps: 15, Total Elapsed Time: 1.600985)
set first breakpoint: Avg: 0.102589 (Laps: 15, Total Elapsed Time: 1.538832)
ok
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@142628 91177308-0d34-0410-b5e6-96231b3b80d8
|
Python
|
apache-2.0
|
llvm-mirror/lldb,llvm-mirror/lldb,llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb,llvm-mirror/lldb
|
Add a benchmark for measuring start up delays of lldb, including:
o create a fresh target; and
o set the first breakpoint
Example (using lldb to set a breakpoint on lldb's Driver::MainLoop function):
./dotest.py -v +b -x '-F Driver::MainLoop()' -p TestStartupDelays.py
...
1: test_startup_delay (TestStartupDelays.StartupDelaysBench)
Test start up delays creating a target and setting a breakpoint. ...
lldb startup delays benchmark:
create fresh target: Avg: 0.106732 (Laps: 15, Total Elapsed Time: 1.600985)
set first breakpoint: Avg: 0.102589 (Laps: 15, Total Elapsed Time: 1.538832)
ok
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@142628 91177308-0d34-0410-b5e6-96231b3b80d8
|
"""Test lldb's startup delays creating a target and setting a breakpoint."""
import os, sys
import unittest2
import lldb
import pexpect
from lldbbench import *
class StartupDelaysBench(BenchBase):
mydir = os.path.join("benchmarks", "startup")
def setUp(self):
BenchBase.setUp(self)
# Create self.stopwatch2 for measuring "set first breakpoint".
# The default self.stopwatch is for "create fresh target".
self.stopwatch2 = Stopwatch()
if lldb.bmExecutable:
self.exe = lldb.bmExecutable
else:
self.exe = self.lldbHere
if lldb.bmBreakpointSpec:
self.break_spec = lldb.bmBreakpointSpec
else:
self.break_spec = '-n main'
self.count = lldb.bmIterationCount
if self.count <= 0:
self.count = 15
@benchmarks_test
def test_startup_delay(self):
"""Test start up delays creating a target and setting a breakpoint."""
print
self.run_startup_delays_bench(self.exe, self.break_spec, self.count)
print "lldb startup delays benchmark:"
print "create fresh target:", self.stopwatch
print "set first breakpoint:", self.stopwatch2
def run_startup_delays_bench(self, exe, break_spec, count):
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
prompt = self.child_prompt
# Reset the stopwatchs now.
self.stopwatch.reset()
self.stopwatch2.reset()
for i in range(count):
# So that the child gets torn down after the test.
self.child = pexpect.spawn('%s %s' % (self.lldbHere, self.lldbOption))
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
with self.stopwatch:
# Create a fresh target.
child.sendline('file %s' % exe) # Aka 'target create'.
child.expect_exact(prompt)
with self.stopwatch2:
# Read debug info and set the first breakpoint.
child.sendline('breakpoint set %s' % break_spec)
child.expect_exact(prompt)
child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
# The test is about to end and if we come to here, the child process has
# been terminated. Mark it so.
self.child = None
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
<commit_before><commit_msg>Add a benchmark for measuring start up delays of lldb, including:
o create a fresh target; and
o set the first breakpoint
Example (using lldb to set a breakpoint on lldb's Driver::MainLoop function):
./dotest.py -v +b -x '-F Driver::MainLoop()' -p TestStartupDelays.py
...
1: test_startup_delay (TestStartupDelays.StartupDelaysBench)
Test start up delays creating a target and setting a breakpoint. ...
lldb startup delays benchmark:
create fresh target: Avg: 0.106732 (Laps: 15, Total Elapsed Time: 1.600985)
set first breakpoint: Avg: 0.102589 (Laps: 15, Total Elapsed Time: 1.538832)
ok
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@142628 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>
|
"""Test lldb's startup delays creating a target and setting a breakpoint."""
import os, sys
import unittest2
import lldb
import pexpect
from lldbbench import *
class StartupDelaysBench(BenchBase):
mydir = os.path.join("benchmarks", "startup")
def setUp(self):
BenchBase.setUp(self)
# Create self.stopwatch2 for measuring "set first breakpoint".
# The default self.stopwatch is for "create fresh target".
self.stopwatch2 = Stopwatch()
if lldb.bmExecutable:
self.exe = lldb.bmExecutable
else:
self.exe = self.lldbHere
if lldb.bmBreakpointSpec:
self.break_spec = lldb.bmBreakpointSpec
else:
self.break_spec = '-n main'
self.count = lldb.bmIterationCount
if self.count <= 0:
self.count = 15
@benchmarks_test
def test_startup_delay(self):
"""Test start up delays creating a target and setting a breakpoint."""
print
self.run_startup_delays_bench(self.exe, self.break_spec, self.count)
print "lldb startup delays benchmark:"
print "create fresh target:", self.stopwatch
print "set first breakpoint:", self.stopwatch2
def run_startup_delays_bench(self, exe, break_spec, count):
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
prompt = self.child_prompt
# Reset the stopwatchs now.
self.stopwatch.reset()
self.stopwatch2.reset()
for i in range(count):
# So that the child gets torn down after the test.
self.child = pexpect.spawn('%s %s' % (self.lldbHere, self.lldbOption))
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
with self.stopwatch:
# Create a fresh target.
child.sendline('file %s' % exe) # Aka 'target create'.
child.expect_exact(prompt)
with self.stopwatch2:
# Read debug info and set the first breakpoint.
child.sendline('breakpoint set %s' % break_spec)
child.expect_exact(prompt)
child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
# The test is about to end and if we come to here, the child process has
# been terminated. Mark it so.
self.child = None
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
Add a benchmark for measuring start up delays of lldb, including:
o create a fresh target; and
o set the first breakpoint
Example (using lldb to set a breakpoint on lldb's Driver::MainLoop function):
./dotest.py -v +b -x '-F Driver::MainLoop()' -p TestStartupDelays.py
...
1: test_startup_delay (TestStartupDelays.StartupDelaysBench)
Test start up delays creating a target and setting a breakpoint. ...
lldb startup delays benchmark:
create fresh target: Avg: 0.106732 (Laps: 15, Total Elapsed Time: 1.600985)
set first breakpoint: Avg: 0.102589 (Laps: 15, Total Elapsed Time: 1.538832)
ok
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@142628 91177308-0d34-0410-b5e6-96231b3b80d8"""Test lldb's startup delays creating a target and setting a breakpoint."""
import os, sys
import unittest2
import lldb
import pexpect
from lldbbench import *
class StartupDelaysBench(BenchBase):
mydir = os.path.join("benchmarks", "startup")
def setUp(self):
BenchBase.setUp(self)
# Create self.stopwatch2 for measuring "set first breakpoint".
# The default self.stopwatch is for "create fresh target".
self.stopwatch2 = Stopwatch()
if lldb.bmExecutable:
self.exe = lldb.bmExecutable
else:
self.exe = self.lldbHere
if lldb.bmBreakpointSpec:
self.break_spec = lldb.bmBreakpointSpec
else:
self.break_spec = '-n main'
self.count = lldb.bmIterationCount
if self.count <= 0:
self.count = 15
@benchmarks_test
def test_startup_delay(self):
"""Test start up delays creating a target and setting a breakpoint."""
print
self.run_startup_delays_bench(self.exe, self.break_spec, self.count)
print "lldb startup delays benchmark:"
print "create fresh target:", self.stopwatch
print "set first breakpoint:", self.stopwatch2
def run_startup_delays_bench(self, exe, break_spec, count):
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
prompt = self.child_prompt
# Reset the stopwatchs now.
self.stopwatch.reset()
self.stopwatch2.reset()
for i in range(count):
# So that the child gets torn down after the test.
self.child = pexpect.spawn('%s %s' % (self.lldbHere, self.lldbOption))
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
with self.stopwatch:
# Create a fresh target.
child.sendline('file %s' % exe) # Aka 'target create'.
child.expect_exact(prompt)
with self.stopwatch2:
# Read debug info and set the first breakpoint.
child.sendline('breakpoint set %s' % break_spec)
child.expect_exact(prompt)
child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
# The test is about to end and if we come to here, the child process has
# been terminated. Mark it so.
self.child = None
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
<commit_before><commit_msg>Add a benchmark for measuring start up delays of lldb, including:
o create a fresh target; and
o set the first breakpoint
Example (using lldb to set a breakpoint on lldb's Driver::MainLoop function):
./dotest.py -v +b -x '-F Driver::MainLoop()' -p TestStartupDelays.py
...
1: test_startup_delay (TestStartupDelays.StartupDelaysBench)
Test start up delays creating a target and setting a breakpoint. ...
lldb startup delays benchmark:
create fresh target: Avg: 0.106732 (Laps: 15, Total Elapsed Time: 1.600985)
set first breakpoint: Avg: 0.102589 (Laps: 15, Total Elapsed Time: 1.538832)
ok
git-svn-id: b33bab8abb5b18c12ee100cd7761ab452d00b2b0@142628 91177308-0d34-0410-b5e6-96231b3b80d8<commit_after>"""Test lldb's startup delays creating a target and setting a breakpoint."""
import os, sys
import unittest2
import lldb
import pexpect
from lldbbench import *
class StartupDelaysBench(BenchBase):
mydir = os.path.join("benchmarks", "startup")
def setUp(self):
BenchBase.setUp(self)
# Create self.stopwatch2 for measuring "set first breakpoint".
# The default self.stopwatch is for "create fresh target".
self.stopwatch2 = Stopwatch()
if lldb.bmExecutable:
self.exe = lldb.bmExecutable
else:
self.exe = self.lldbHere
if lldb.bmBreakpointSpec:
self.break_spec = lldb.bmBreakpointSpec
else:
self.break_spec = '-n main'
self.count = lldb.bmIterationCount
if self.count <= 0:
self.count = 15
@benchmarks_test
def test_startup_delay(self):
"""Test start up delays creating a target and setting a breakpoint."""
print
self.run_startup_delays_bench(self.exe, self.break_spec, self.count)
print "lldb startup delays benchmark:"
print "create fresh target:", self.stopwatch
print "set first breakpoint:", self.stopwatch2
def run_startup_delays_bench(self, exe, break_spec, count):
# Set self.child_prompt, which is "(lldb) ".
self.child_prompt = '(lldb) '
prompt = self.child_prompt
# Reset the stopwatchs now.
self.stopwatch.reset()
self.stopwatch2.reset()
for i in range(count):
# So that the child gets torn down after the test.
self.child = pexpect.spawn('%s %s' % (self.lldbHere, self.lldbOption))
child = self.child
# Turn on logging for what the child sends back.
if self.TraceOn():
child.logfile_read = sys.stdout
with self.stopwatch:
# Create a fresh target.
child.sendline('file %s' % exe) # Aka 'target create'.
child.expect_exact(prompt)
with self.stopwatch2:
# Read debug info and set the first breakpoint.
child.sendline('breakpoint set %s' % break_spec)
child.expect_exact(prompt)
child.sendline('quit')
try:
self.child.expect(pexpect.EOF)
except:
pass
# The test is about to end and if we come to here, the child process has
# been terminated. Mark it so.
self.child = None
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
|
|
047697aa92f297c5e9c74ef0bd9eb3702b5fce13
|
bioagents/dtda/generate_tas_dump.py
|
bioagents/dtda/generate_tas_dump.py
|
import os
import pickle
from collections import defaultdict
from indra.sources import tas
from indra.databases import drugbank_client
def normalize_drug(drug):
flags = score_drug(drug)
if 'long_name' in flags:
if 'DRUGBANK' in drug.db_refs:
db_name = \
drugbank_client.get_drugbank_name(drug.db_refs['DRUGBANK'])
if db_name:
drug.name = db_name
def score_drug(drug):
flags = set()
if any(char in drug.name for char in {'(', '[', '{', ','}):
flags.add('has_special_char')
if 'CHEBI' not in drug.db_refs and 'CHEMBL' in drug.db_refs:
flags.add('chembl_not_chebi')
if len(drug.name) > 20:
flags.add('long_name')
if ' ' in drug.name:
flags.add('has_space')
return flags
def choose_best_stmt(stmt_group):
for stmt in stmt_group:
normalize_drug(stmt.subj)
stmts = sorted(stmt_group,
key=lambda x:
(len(score_drug(x.subj)),
len(x.subj.name)))
if len(stmt_group) > 1:
print('Choosing: %s (%s) from' %
(stmts[0].subj, score_drug(stmts[0].subj)))
for stmt in stmts:
print(stmt.subj, score_drug(stmt.subj))
print()
return stmts[0]
if __name__ == '__main__':
tp = tas.process_from_web(affinity_class_limit=2, named_only=True,
standardized_only=False)
grouped = defaultdict(list)
for stmt in tp.statements:
grouped[(stmt.subj.db_refs['LSPCI'], stmt.obj.name)].append(stmt)
opt_stmts = []
for (lspci, obj_name), stmts in grouped.items():
opt_stmt = choose_best_stmt(stmts)
opt_stmts.append(opt_stmt)
fname = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.pardir, 'resources', 'tas_stmts_filtered.pkl')
with open(fname, 'wb') as fh:
pickle.dump(opt_stmts, fh)
|
Add script to reproduce custom TAS dump
|
Add script to reproduce custom TAS dump
|
Python
|
bsd-2-clause
|
sorgerlab/bioagents,bgyori/bioagents
|
Add script to reproduce custom TAS dump
|
import os
import pickle
from collections import defaultdict
from indra.sources import tas
from indra.databases import drugbank_client
def normalize_drug(drug):
flags = score_drug(drug)
if 'long_name' in flags:
if 'DRUGBANK' in drug.db_refs:
db_name = \
drugbank_client.get_drugbank_name(drug.db_refs['DRUGBANK'])
if db_name:
drug.name = db_name
def score_drug(drug):
flags = set()
if any(char in drug.name for char in {'(', '[', '{', ','}):
flags.add('has_special_char')
if 'CHEBI' not in drug.db_refs and 'CHEMBL' in drug.db_refs:
flags.add('chembl_not_chebi')
if len(drug.name) > 20:
flags.add('long_name')
if ' ' in drug.name:
flags.add('has_space')
return flags
def choose_best_stmt(stmt_group):
for stmt in stmt_group:
normalize_drug(stmt.subj)
stmts = sorted(stmt_group,
key=lambda x:
(len(score_drug(x.subj)),
len(x.subj.name)))
if len(stmt_group) > 1:
print('Choosing: %s (%s) from' %
(stmts[0].subj, score_drug(stmts[0].subj)))
for stmt in stmts:
print(stmt.subj, score_drug(stmt.subj))
print()
return stmts[0]
if __name__ == '__main__':
tp = tas.process_from_web(affinity_class_limit=2, named_only=True,
standardized_only=False)
grouped = defaultdict(list)
for stmt in tp.statements:
grouped[(stmt.subj.db_refs['LSPCI'], stmt.obj.name)].append(stmt)
opt_stmts = []
for (lspci, obj_name), stmts in grouped.items():
opt_stmt = choose_best_stmt(stmts)
opt_stmts.append(opt_stmt)
fname = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.pardir, 'resources', 'tas_stmts_filtered.pkl')
with open(fname, 'wb') as fh:
pickle.dump(opt_stmts, fh)
|
<commit_before><commit_msg>Add script to reproduce custom TAS dump<commit_after>
|
import os
import pickle
from collections import defaultdict
from indra.sources import tas
from indra.databases import drugbank_client
def normalize_drug(drug):
flags = score_drug(drug)
if 'long_name' in flags:
if 'DRUGBANK' in drug.db_refs:
db_name = \
drugbank_client.get_drugbank_name(drug.db_refs['DRUGBANK'])
if db_name:
drug.name = db_name
def score_drug(drug):
flags = set()
if any(char in drug.name for char in {'(', '[', '{', ','}):
flags.add('has_special_char')
if 'CHEBI' not in drug.db_refs and 'CHEMBL' in drug.db_refs:
flags.add('chembl_not_chebi')
if len(drug.name) > 20:
flags.add('long_name')
if ' ' in drug.name:
flags.add('has_space')
return flags
def choose_best_stmt(stmt_group):
for stmt in stmt_group:
normalize_drug(stmt.subj)
stmts = sorted(stmt_group,
key=lambda x:
(len(score_drug(x.subj)),
len(x.subj.name)))
if len(stmt_group) > 1:
print('Choosing: %s (%s) from' %
(stmts[0].subj, score_drug(stmts[0].subj)))
for stmt in stmts:
print(stmt.subj, score_drug(stmt.subj))
print()
return stmts[0]
if __name__ == '__main__':
tp = tas.process_from_web(affinity_class_limit=2, named_only=True,
standardized_only=False)
grouped = defaultdict(list)
for stmt in tp.statements:
grouped[(stmt.subj.db_refs['LSPCI'], stmt.obj.name)].append(stmt)
opt_stmts = []
for (lspci, obj_name), stmts in grouped.items():
opt_stmt = choose_best_stmt(stmts)
opt_stmts.append(opt_stmt)
fname = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.pardir, 'resources', 'tas_stmts_filtered.pkl')
with open(fname, 'wb') as fh:
pickle.dump(opt_stmts, fh)
|
Add script to reproduce custom TAS dumpimport os
import pickle
from collections import defaultdict
from indra.sources import tas
from indra.databases import drugbank_client
def normalize_drug(drug):
flags = score_drug(drug)
if 'long_name' in flags:
if 'DRUGBANK' in drug.db_refs:
db_name = \
drugbank_client.get_drugbank_name(drug.db_refs['DRUGBANK'])
if db_name:
drug.name = db_name
def score_drug(drug):
flags = set()
if any(char in drug.name for char in {'(', '[', '{', ','}):
flags.add('has_special_char')
if 'CHEBI' not in drug.db_refs and 'CHEMBL' in drug.db_refs:
flags.add('chembl_not_chebi')
if len(drug.name) > 20:
flags.add('long_name')
if ' ' in drug.name:
flags.add('has_space')
return flags
def choose_best_stmt(stmt_group):
for stmt in stmt_group:
normalize_drug(stmt.subj)
stmts = sorted(stmt_group,
key=lambda x:
(len(score_drug(x.subj)),
len(x.subj.name)))
if len(stmt_group) > 1:
print('Choosing: %s (%s) from' %
(stmts[0].subj, score_drug(stmts[0].subj)))
for stmt in stmts:
print(stmt.subj, score_drug(stmt.subj))
print()
return stmts[0]
if __name__ == '__main__':
tp = tas.process_from_web(affinity_class_limit=2, named_only=True,
standardized_only=False)
grouped = defaultdict(list)
for stmt in tp.statements:
grouped[(stmt.subj.db_refs['LSPCI'], stmt.obj.name)].append(stmt)
opt_stmts = []
for (lspci, obj_name), stmts in grouped.items():
opt_stmt = choose_best_stmt(stmts)
opt_stmts.append(opt_stmt)
fname = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.pardir, 'resources', 'tas_stmts_filtered.pkl')
with open(fname, 'wb') as fh:
pickle.dump(opt_stmts, fh)
|
<commit_before><commit_msg>Add script to reproduce custom TAS dump<commit_after>import os
import pickle
from collections import defaultdict
from indra.sources import tas
from indra.databases import drugbank_client
def normalize_drug(drug):
flags = score_drug(drug)
if 'long_name' in flags:
if 'DRUGBANK' in drug.db_refs:
db_name = \
drugbank_client.get_drugbank_name(drug.db_refs['DRUGBANK'])
if db_name:
drug.name = db_name
def score_drug(drug):
flags = set()
if any(char in drug.name for char in {'(', '[', '{', ','}):
flags.add('has_special_char')
if 'CHEBI' not in drug.db_refs and 'CHEMBL' in drug.db_refs:
flags.add('chembl_not_chebi')
if len(drug.name) > 20:
flags.add('long_name')
if ' ' in drug.name:
flags.add('has_space')
return flags
def choose_best_stmt(stmt_group):
for stmt in stmt_group:
normalize_drug(stmt.subj)
stmts = sorted(stmt_group,
key=lambda x:
(len(score_drug(x.subj)),
len(x.subj.name)))
if len(stmt_group) > 1:
print('Choosing: %s (%s) from' %
(stmts[0].subj, score_drug(stmts[0].subj)))
for stmt in stmts:
print(stmt.subj, score_drug(stmt.subj))
print()
return stmts[0]
if __name__ == '__main__':
tp = tas.process_from_web(affinity_class_limit=2, named_only=True,
standardized_only=False)
grouped = defaultdict(list)
for stmt in tp.statements:
grouped[(stmt.subj.db_refs['LSPCI'], stmt.obj.name)].append(stmt)
opt_stmts = []
for (lspci, obj_name), stmts in grouped.items():
opt_stmt = choose_best_stmt(stmts)
opt_stmts.append(opt_stmt)
fname = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.pardir, 'resources', 'tas_stmts_filtered.pkl')
with open(fname, 'wb') as fh:
pickle.dump(opt_stmts, fh)
|
|
a9af94015d395a674a8ba542b1ba12bc74114f47
|
tests/test_stack/test_queue_two_stacks.py
|
tests/test_stack/test_queue_two_stacks.py
|
import unittest
from aids.stack.queue_two_stacks import QueueUsingTwoStacks
class QueueTwoStacksTestCase(unittest.TestCase):
'''
Unit tests for the Queue data structure implemented using two stacks
'''
def setUp(self):
self.test_queue = QueueUsingTwoStacks()
def test_queue_initialization(self):
self.assertTrue(isinstance(self.test_queue, QueueUsingTwoStacks))
def test_queue_is_empty(self):
self.assertTrue(self.test_queue.is_empty())
def test_queue_enqueue(self):
self.test_queue.enqueue(1)
self.assertEqual(len(self.test_queue), 1)
def test_queue_dequeue(self):
self.test_queue.enqueue(1)
self.assertEqual(self.test_queue.dequeue(), 1)
def test_queue_len(self):
self.test_queue.enqueue(1)
self.assertEqual(len(self.test_queue), 1)
def tearDown(self):
pass
|
Add unit tests for queue_two_stacks
|
Add unit tests for queue_two_stacks
|
Python
|
mit
|
ueg1990/aids
|
Add unit tests for queue_two_stacks
|
import unittest
from aids.stack.queue_two_stacks import QueueUsingTwoStacks
class QueueTwoStacksTestCase(unittest.TestCase):
'''
Unit tests for the Queue data structure implemented using two stacks
'''
def setUp(self):
self.test_queue = QueueUsingTwoStacks()
def test_queue_initialization(self):
self.assertTrue(isinstance(self.test_queue, QueueUsingTwoStacks))
def test_queue_is_empty(self):
self.assertTrue(self.test_queue.is_empty())
def test_queue_enqueue(self):
self.test_queue.enqueue(1)
self.assertEqual(len(self.test_queue), 1)
def test_queue_dequeue(self):
self.test_queue.enqueue(1)
self.assertEqual(self.test_queue.dequeue(), 1)
def test_queue_len(self):
self.test_queue.enqueue(1)
self.assertEqual(len(self.test_queue), 1)
def tearDown(self):
pass
|
<commit_before><commit_msg>Add unit tests for queue_two_stacks<commit_after>
|
import unittest
from aids.stack.queue_two_stacks import QueueUsingTwoStacks
class QueueTwoStacksTestCase(unittest.TestCase):
'''
Unit tests for the Queue data structure implemented using two stacks
'''
def setUp(self):
self.test_queue = QueueUsingTwoStacks()
def test_queue_initialization(self):
self.assertTrue(isinstance(self.test_queue, QueueUsingTwoStacks))
def test_queue_is_empty(self):
self.assertTrue(self.test_queue.is_empty())
def test_queue_enqueue(self):
self.test_queue.enqueue(1)
self.assertEqual(len(self.test_queue), 1)
def test_queue_dequeue(self):
self.test_queue.enqueue(1)
self.assertEqual(self.test_queue.dequeue(), 1)
def test_queue_len(self):
self.test_queue.enqueue(1)
self.assertEqual(len(self.test_queue), 1)
def tearDown(self):
pass
|
Add unit tests for queue_two_stacksimport unittest
from aids.stack.queue_two_stacks import QueueUsingTwoStacks
class QueueTwoStacksTestCase(unittest.TestCase):
'''
Unit tests for the Queue data structure implemented using two stacks
'''
def setUp(self):
self.test_queue = QueueUsingTwoStacks()
def test_queue_initialization(self):
self.assertTrue(isinstance(self.test_queue, QueueUsingTwoStacks))
def test_queue_is_empty(self):
self.assertTrue(self.test_queue.is_empty())
def test_queue_enqueue(self):
self.test_queue.enqueue(1)
self.assertEqual(len(self.test_queue), 1)
def test_queue_dequeue(self):
self.test_queue.enqueue(1)
self.assertEqual(self.test_queue.dequeue(), 1)
def test_queue_len(self):
self.test_queue.enqueue(1)
self.assertEqual(len(self.test_queue), 1)
def tearDown(self):
pass
|
<commit_before><commit_msg>Add unit tests for queue_two_stacks<commit_after>import unittest
from aids.stack.queue_two_stacks import QueueUsingTwoStacks
class QueueTwoStacksTestCase(unittest.TestCase):
'''
Unit tests for the Queue data structure implemented using two stacks
'''
def setUp(self):
self.test_queue = QueueUsingTwoStacks()
def test_queue_initialization(self):
self.assertTrue(isinstance(self.test_queue, QueueUsingTwoStacks))
def test_queue_is_empty(self):
self.assertTrue(self.test_queue.is_empty())
def test_queue_enqueue(self):
self.test_queue.enqueue(1)
self.assertEqual(len(self.test_queue), 1)
def test_queue_dequeue(self):
self.test_queue.enqueue(1)
self.assertEqual(self.test_queue.dequeue(), 1)
def test_queue_len(self):
self.test_queue.enqueue(1)
self.assertEqual(len(self.test_queue), 1)
def tearDown(self):
pass
|
|
6ec81721055c4a236a4e6f3cd4bac3a116aea01a
|
tests/unit/cloud/clouds/test_qingcloud.py
|
tests/unit/cloud/clouds/test_qingcloud.py
|
import copy
from salt.cloud.clouds import qingcloud
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
class QingCloudTestCase(TestCase, LoaderModuleMockMixin):
"""
Unit TestCase for salt.cloud.clouds.qingcloud module.
"""
def setUp(self):
self.provider = {
"providers": {
"qingcloud": {
"qingcloud": {
"access_key_id": "key_1234",
"secret_access_key": "1234",
"zone": "test_zone",
"key_filename": "/testfilename",
"driver": "qingcloud",
}
}
}
}
def setup_loader_modules(self):
return {
qingcloud: {
"__opts__": {
"providers": {"qingcloud": {}},
"profiles": {"qingcloud": {}},
},
"__active_provider_name__": "qingcloud:qingcloud",
},
}
def test_qingcloud_verify_ssl(self):
"""
test qinglcoud when using verify_ssl
"""
patch_sig = patch("salt.cloud.clouds.qingcloud._compute_signature", MagicMock())
for verify in [True, False, None]:
mock_requests = MagicMock()
mock_requests.return_value.status_code = 200
mock_requests.return_value.text = '{"ret_code": 0}'
patch_requests = patch("requests.get", mock_requests)
opts = copy.deepcopy(self.provider)
opts["providers"]["qingcloud"]["qingcloud"]["verify_ssl"] = verify
patch_opts = patch.dict(qingcloud.__opts__, opts)
with patch_sig, patch_requests, patch_opts:
ret = qingcloud.query()
self.assertEqual(ret["ret_code"], 0)
self.assertEqual(
mock_requests.call_args_list[0].kwargs["verify"], verify
)
|
Validate qingcloud defaults to verifying SSL
|
Validate qingcloud defaults to verifying SSL
|
Python
|
apache-2.0
|
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
|
Validate qingcloud defaults to verifying SSL
|
import copy
from salt.cloud.clouds import qingcloud
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
class QingCloudTestCase(TestCase, LoaderModuleMockMixin):
"""
Unit TestCase for salt.cloud.clouds.qingcloud module.
"""
def setUp(self):
self.provider = {
"providers": {
"qingcloud": {
"qingcloud": {
"access_key_id": "key_1234",
"secret_access_key": "1234",
"zone": "test_zone",
"key_filename": "/testfilename",
"driver": "qingcloud",
}
}
}
}
def setup_loader_modules(self):
return {
qingcloud: {
"__opts__": {
"providers": {"qingcloud": {}},
"profiles": {"qingcloud": {}},
},
"__active_provider_name__": "qingcloud:qingcloud",
},
}
def test_qingcloud_verify_ssl(self):
"""
test qinglcoud when using verify_ssl
"""
patch_sig = patch("salt.cloud.clouds.qingcloud._compute_signature", MagicMock())
for verify in [True, False, None]:
mock_requests = MagicMock()
mock_requests.return_value.status_code = 200
mock_requests.return_value.text = '{"ret_code": 0}'
patch_requests = patch("requests.get", mock_requests)
opts = copy.deepcopy(self.provider)
opts["providers"]["qingcloud"]["qingcloud"]["verify_ssl"] = verify
patch_opts = patch.dict(qingcloud.__opts__, opts)
with patch_sig, patch_requests, patch_opts:
ret = qingcloud.query()
self.assertEqual(ret["ret_code"], 0)
self.assertEqual(
mock_requests.call_args_list[0].kwargs["verify"], verify
)
|
<commit_before><commit_msg>Validate qingcloud defaults to verifying SSL<commit_after>
|
import copy
from salt.cloud.clouds import qingcloud
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
class QingCloudTestCase(TestCase, LoaderModuleMockMixin):
"""
Unit TestCase for salt.cloud.clouds.qingcloud module.
"""
def setUp(self):
self.provider = {
"providers": {
"qingcloud": {
"qingcloud": {
"access_key_id": "key_1234",
"secret_access_key": "1234",
"zone": "test_zone",
"key_filename": "/testfilename",
"driver": "qingcloud",
}
}
}
}
def setup_loader_modules(self):
return {
qingcloud: {
"__opts__": {
"providers": {"qingcloud": {}},
"profiles": {"qingcloud": {}},
},
"__active_provider_name__": "qingcloud:qingcloud",
},
}
def test_qingcloud_verify_ssl(self):
"""
test qinglcoud when using verify_ssl
"""
patch_sig = patch("salt.cloud.clouds.qingcloud._compute_signature", MagicMock())
for verify in [True, False, None]:
mock_requests = MagicMock()
mock_requests.return_value.status_code = 200
mock_requests.return_value.text = '{"ret_code": 0}'
patch_requests = patch("requests.get", mock_requests)
opts = copy.deepcopy(self.provider)
opts["providers"]["qingcloud"]["qingcloud"]["verify_ssl"] = verify
patch_opts = patch.dict(qingcloud.__opts__, opts)
with patch_sig, patch_requests, patch_opts:
ret = qingcloud.query()
self.assertEqual(ret["ret_code"], 0)
self.assertEqual(
mock_requests.call_args_list[0].kwargs["verify"], verify
)
|
Validate qingcloud defaults to verifying SSLimport copy
from salt.cloud.clouds import qingcloud
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
class QingCloudTestCase(TestCase, LoaderModuleMockMixin):
"""
Unit TestCase for salt.cloud.clouds.qingcloud module.
"""
def setUp(self):
self.provider = {
"providers": {
"qingcloud": {
"qingcloud": {
"access_key_id": "key_1234",
"secret_access_key": "1234",
"zone": "test_zone",
"key_filename": "/testfilename",
"driver": "qingcloud",
}
}
}
}
def setup_loader_modules(self):
return {
qingcloud: {
"__opts__": {
"providers": {"qingcloud": {}},
"profiles": {"qingcloud": {}},
},
"__active_provider_name__": "qingcloud:qingcloud",
},
}
def test_qingcloud_verify_ssl(self):
"""
test qinglcoud when using verify_ssl
"""
patch_sig = patch("salt.cloud.clouds.qingcloud._compute_signature", MagicMock())
for verify in [True, False, None]:
mock_requests = MagicMock()
mock_requests.return_value.status_code = 200
mock_requests.return_value.text = '{"ret_code": 0}'
patch_requests = patch("requests.get", mock_requests)
opts = copy.deepcopy(self.provider)
opts["providers"]["qingcloud"]["qingcloud"]["verify_ssl"] = verify
patch_opts = patch.dict(qingcloud.__opts__, opts)
with patch_sig, patch_requests, patch_opts:
ret = qingcloud.query()
self.assertEqual(ret["ret_code"], 0)
self.assertEqual(
mock_requests.call_args_list[0].kwargs["verify"], verify
)
|
<commit_before><commit_msg>Validate qingcloud defaults to verifying SSL<commit_after>import copy
from salt.cloud.clouds import qingcloud
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.mock import MagicMock, patch
from tests.support.unit import TestCase
class QingCloudTestCase(TestCase, LoaderModuleMockMixin):
"""
Unit TestCase for salt.cloud.clouds.qingcloud module.
"""
def setUp(self):
self.provider = {
"providers": {
"qingcloud": {
"qingcloud": {
"access_key_id": "key_1234",
"secret_access_key": "1234",
"zone": "test_zone",
"key_filename": "/testfilename",
"driver": "qingcloud",
}
}
}
}
def setup_loader_modules(self):
return {
qingcloud: {
"__opts__": {
"providers": {"qingcloud": {}},
"profiles": {"qingcloud": {}},
},
"__active_provider_name__": "qingcloud:qingcloud",
},
}
def test_qingcloud_verify_ssl(self):
"""
test qinglcoud when using verify_ssl
"""
patch_sig = patch("salt.cloud.clouds.qingcloud._compute_signature", MagicMock())
for verify in [True, False, None]:
mock_requests = MagicMock()
mock_requests.return_value.status_code = 200
mock_requests.return_value.text = '{"ret_code": 0}'
patch_requests = patch("requests.get", mock_requests)
opts = copy.deepcopy(self.provider)
opts["providers"]["qingcloud"]["qingcloud"]["verify_ssl"] = verify
patch_opts = patch.dict(qingcloud.__opts__, opts)
with patch_sig, patch_requests, patch_opts:
ret = qingcloud.query()
self.assertEqual(ret["ret_code"], 0)
self.assertEqual(
mock_requests.call_args_list[0].kwargs["verify"], verify
)
|
|
da722b889a2b637bf3895f9f0c5e614deee7e45f
|
scripts/util/add_clifiles.py
|
scripts/util/add_clifiles.py
|
"""Utility script that copies neighboring clifiles when it is discovered
that we need new ones!"""
import psycopg2
import os
import sys
import shutil
def missing_logic(fn):
"""Figure out what to do when this filename is missing"""
print("Searching for replacement for '%s'" % (fn,))
lon = float(fn[17:23])
lat = float(fn[24:30])
for xoff in [0, -1, 1, -2, 2, -3, 3]:
for yoff in [0, -1, 1, -2, 2, -3, 3]:
lon2 = lon + xoff / 100.0
lat2 = lat + yoff / 100.0
testfn = "cli/%03.0fx%03.0f/%06.2fx%06.2f.cli" % (lon2, lat2,
lon2, lat2)
if not os.path.isfile(testfn):
continue
print("%s->%s" % (testfn, fn))
shutil.copyfile(testfn, fn)
return
print("--> failure for %s" % (fn,))
def main(argv):
"""Go Main Go!"""
scenario = argv[1]
pgconn = psycopg2.connect(database='idep', host='iemdb', user='nobody')
cursor = pgconn.cursor()
cursor.execute("""
SELECT climate_file from flowpaths where scenario = %s
""", (scenario, ))
for row in cursor:
fn = "/i/%s/%s" % (scenario, row[0])
if os.path.isfile(fn):
continue
missing_logic(fn)
if __name__ == '__main__':
main(sys.argv)
|
Add utility for copying clifiles when new points are needed.
|
Add utility for copying clifiles when new points are needed.
|
Python
|
mit
|
akrherz/idep,akrherz/idep,akrherz/dep,akrherz/dep,akrherz/dep,akrherz/idep,akrherz/dep,akrherz/dep,akrherz/idep,akrherz/idep,akrherz/idep
|
Add utility for copying clifiles when new points are needed.
|
"""Utility script that copies neighboring clifiles when it is discovered
that we need new ones!"""
import psycopg2
import os
import sys
import shutil
def missing_logic(fn):
"""Figure out what to do when this filename is missing"""
print("Searching for replacement for '%s'" % (fn,))
lon = float(fn[17:23])
lat = float(fn[24:30])
for xoff in [0, -1, 1, -2, 2, -3, 3]:
for yoff in [0, -1, 1, -2, 2, -3, 3]:
lon2 = lon + xoff / 100.0
lat2 = lat + yoff / 100.0
testfn = "cli/%03.0fx%03.0f/%06.2fx%06.2f.cli" % (lon2, lat2,
lon2, lat2)
if not os.path.isfile(testfn):
continue
print("%s->%s" % (testfn, fn))
shutil.copyfile(testfn, fn)
return
print("--> failure for %s" % (fn,))
def main(argv):
"""Go Main Go!"""
scenario = argv[1]
pgconn = psycopg2.connect(database='idep', host='iemdb', user='nobody')
cursor = pgconn.cursor()
cursor.execute("""
SELECT climate_file from flowpaths where scenario = %s
""", (scenario, ))
for row in cursor:
fn = "/i/%s/%s" % (scenario, row[0])
if os.path.isfile(fn):
continue
missing_logic(fn)
if __name__ == '__main__':
main(sys.argv)
|
<commit_before><commit_msg>Add utility for copying clifiles when new points are needed.<commit_after>
|
"""Utility script that copies neighboring clifiles when it is discovered
that we need new ones!"""
import psycopg2
import os
import sys
import shutil
def missing_logic(fn):
"""Figure out what to do when this filename is missing"""
print("Searching for replacement for '%s'" % (fn,))
lon = float(fn[17:23])
lat = float(fn[24:30])
for xoff in [0, -1, 1, -2, 2, -3, 3]:
for yoff in [0, -1, 1, -2, 2, -3, 3]:
lon2 = lon + xoff / 100.0
lat2 = lat + yoff / 100.0
testfn = "cli/%03.0fx%03.0f/%06.2fx%06.2f.cli" % (lon2, lat2,
lon2, lat2)
if not os.path.isfile(testfn):
continue
print("%s->%s" % (testfn, fn))
shutil.copyfile(testfn, fn)
return
print("--> failure for %s" % (fn,))
def main(argv):
"""Go Main Go!"""
scenario = argv[1]
pgconn = psycopg2.connect(database='idep', host='iemdb', user='nobody')
cursor = pgconn.cursor()
cursor.execute("""
SELECT climate_file from flowpaths where scenario = %s
""", (scenario, ))
for row in cursor:
fn = "/i/%s/%s" % (scenario, row[0])
if os.path.isfile(fn):
continue
missing_logic(fn)
if __name__ == '__main__':
main(sys.argv)
|
Add utility for copying clifiles when new points are needed."""Utility script that copies neighboring clifiles when it is discovered
that we need new ones!"""
import psycopg2
import os
import sys
import shutil
def missing_logic(fn):
"""Figure out what to do when this filename is missing"""
print("Searching for replacement for '%s'" % (fn,))
lon = float(fn[17:23])
lat = float(fn[24:30])
for xoff in [0, -1, 1, -2, 2, -3, 3]:
for yoff in [0, -1, 1, -2, 2, -3, 3]:
lon2 = lon + xoff / 100.0
lat2 = lat + yoff / 100.0
testfn = "cli/%03.0fx%03.0f/%06.2fx%06.2f.cli" % (lon2, lat2,
lon2, lat2)
if not os.path.isfile(testfn):
continue
print("%s->%s" % (testfn, fn))
shutil.copyfile(testfn, fn)
return
print("--> failure for %s" % (fn,))
def main(argv):
"""Go Main Go!"""
scenario = argv[1]
pgconn = psycopg2.connect(database='idep', host='iemdb', user='nobody')
cursor = pgconn.cursor()
cursor.execute("""
SELECT climate_file from flowpaths where scenario = %s
""", (scenario, ))
for row in cursor:
fn = "/i/%s/%s" % (scenario, row[0])
if os.path.isfile(fn):
continue
missing_logic(fn)
if __name__ == '__main__':
main(sys.argv)
|
<commit_before><commit_msg>Add utility for copying clifiles when new points are needed.<commit_after>"""Utility script that copies neighboring clifiles when it is discovered
that we need new ones!"""
import psycopg2
import os
import sys
import shutil
def missing_logic(fn):
"""Figure out what to do when this filename is missing"""
print("Searching for replacement for '%s'" % (fn,))
lon = float(fn[17:23])
lat = float(fn[24:30])
for xoff in [0, -1, 1, -2, 2, -3, 3]:
for yoff in [0, -1, 1, -2, 2, -3, 3]:
lon2 = lon + xoff / 100.0
lat2 = lat + yoff / 100.0
testfn = "cli/%03.0fx%03.0f/%06.2fx%06.2f.cli" % (lon2, lat2,
lon2, lat2)
if not os.path.isfile(testfn):
continue
print("%s->%s" % (testfn, fn))
shutil.copyfile(testfn, fn)
return
print("--> failure for %s" % (fn,))
def main(argv):
"""Go Main Go!"""
scenario = argv[1]
pgconn = psycopg2.connect(database='idep', host='iemdb', user='nobody')
cursor = pgconn.cursor()
cursor.execute("""
SELECT climate_file from flowpaths where scenario = %s
""", (scenario, ))
for row in cursor:
fn = "/i/%s/%s" % (scenario, row[0])
if os.path.isfile(fn):
continue
missing_logic(fn)
if __name__ == '__main__':
main(sys.argv)
|
|
1e363121f66d0ef01de6ff83b3dd9167df4d146f
|
tests/RemovingUnitRules/__init__.py
|
tests/RemovingUnitRules/__init__.py
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 24.08.2017 11:32
:Licence GNUv3
Part of grammpy-transforms
"""
|
Add directory for tests of removing unit rules
|
Add directory for tests of removing unit rules
|
Python
|
mit
|
PatrikValkovic/grammpy
|
Add directory for tests of removing unit rules
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 24.08.2017 11:32
:Licence GNUv3
Part of grammpy-transforms
"""
|
<commit_before><commit_msg>Add directory for tests of removing unit rules<commit_after>
|
#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 24.08.2017 11:32
:Licence GNUv3
Part of grammpy-transforms
"""
|
Add directory for tests of removing unit rules#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 24.08.2017 11:32
:Licence GNUv3
Part of grammpy-transforms
"""
|
<commit_before><commit_msg>Add directory for tests of removing unit rules<commit_after>#!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 24.08.2017 11:32
:Licence GNUv3
Part of grammpy-transforms
"""
|
|
e44e50d16726b8517d497f50a276a9851be7fcf0
|
tests/cmd/test_manage.py
|
tests/cmd/test_manage.py
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import sys
from rally.cmd import manage
from rally import test
class CmdManageTestCase(test.TestCase):
def setUp(self):
super(CmdManageTestCase, self).setUp()
self.db_commands = manage.DBCommands()
@mock.patch('rally.cmd.manage.db')
def test_recreate(self, mock_db):
self.db_commands.recreate()
calls = [mock.call.db_drop(), mock.call.db_create()]
self.assertEqual(calls, mock_db.mock_calls)
@mock.patch('rally.cmd.manage.cliutils')
def test_main(self, cli_mock):
manage.main()
categories = {'db': manage.DBCommands}
cli_mock.run.assert_called_once_with(sys.argv, categories)
|
Add missing tests for rally/cmd/manage.py
|
Add missing tests for rally/cmd/manage.py
Change-Id: Id0e6659b957a346ab7d9aed4ff295e61f7eed515
|
Python
|
apache-2.0
|
cernops/rally,eonpatapon/rally,yeming233/rally,ytsarev/rally,paboldin/rally,vefimova/rally,gluke77/rally,group-policy/rally,aforalee/RRally,yeming233/rally,cernops/rally,varunarya10/rally,go-bears/rally,aplanas/rally,shdowofdeath/rally,vefimova/rally,amit0701/rally,vponomaryov/rally,openstack/rally,group-policy/rally,gluke77/rally,pyKun/rally,vganapath/rally,eayunstack/rally,aplanas/rally,openstack/rally,eayunstack/rally,gluke77/rally,afaheem88/rally,go-bears/rally,vponomaryov/rally,ytsarev/rally,amit0701/rally,pandeyop/rally,group-policy/rally,shdowofdeath/rally,eayunstack/rally,gluke77/rally,pandeyop/rally,vganapath/rally,openstack/rally,eonpatapon/rally,aforalee/RRally,varunarya10/rally,pyKun/rally,paboldin/rally,openstack/rally,vganapath/rally,redhat-openstack/rally,afaheem88/rally,paboldin/rally,amit0701/rally,redhat-openstack/rally,vganapath/rally
|
Add missing tests for rally/cmd/manage.py
Change-Id: Id0e6659b957a346ab7d9aed4ff295e61f7eed515
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import sys
from rally.cmd import manage
from rally import test
class CmdManageTestCase(test.TestCase):
def setUp(self):
super(CmdManageTestCase, self).setUp()
self.db_commands = manage.DBCommands()
@mock.patch('rally.cmd.manage.db')
def test_recreate(self, mock_db):
self.db_commands.recreate()
calls = [mock.call.db_drop(), mock.call.db_create()]
self.assertEqual(calls, mock_db.mock_calls)
@mock.patch('rally.cmd.manage.cliutils')
def test_main(self, cli_mock):
manage.main()
categories = {'db': manage.DBCommands}
cli_mock.run.assert_called_once_with(sys.argv, categories)
|
<commit_before><commit_msg>Add missing tests for rally/cmd/manage.py
Change-Id: Id0e6659b957a346ab7d9aed4ff295e61f7eed515<commit_after>
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import sys
from rally.cmd import manage
from rally import test
class CmdManageTestCase(test.TestCase):
def setUp(self):
super(CmdManageTestCase, self).setUp()
self.db_commands = manage.DBCommands()
@mock.patch('rally.cmd.manage.db')
def test_recreate(self, mock_db):
self.db_commands.recreate()
calls = [mock.call.db_drop(), mock.call.db_create()]
self.assertEqual(calls, mock_db.mock_calls)
@mock.patch('rally.cmd.manage.cliutils')
def test_main(self, cli_mock):
manage.main()
categories = {'db': manage.DBCommands}
cli_mock.run.assert_called_once_with(sys.argv, categories)
|
Add missing tests for rally/cmd/manage.py
Change-Id: Id0e6659b957a346ab7d9aed4ff295e61f7eed515# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import sys
from rally.cmd import manage
from rally import test
class CmdManageTestCase(test.TestCase):
def setUp(self):
super(CmdManageTestCase, self).setUp()
self.db_commands = manage.DBCommands()
@mock.patch('rally.cmd.manage.db')
def test_recreate(self, mock_db):
self.db_commands.recreate()
calls = [mock.call.db_drop(), mock.call.db_create()]
self.assertEqual(calls, mock_db.mock_calls)
@mock.patch('rally.cmd.manage.cliutils')
def test_main(self, cli_mock):
manage.main()
categories = {'db': manage.DBCommands}
cli_mock.run.assert_called_once_with(sys.argv, categories)
|
<commit_before><commit_msg>Add missing tests for rally/cmd/manage.py
Change-Id: Id0e6659b957a346ab7d9aed4ff295e61f7eed515<commit_after># Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import sys
from rally.cmd import manage
from rally import test
class CmdManageTestCase(test.TestCase):
def setUp(self):
super(CmdManageTestCase, self).setUp()
self.db_commands = manage.DBCommands()
@mock.patch('rally.cmd.manage.db')
def test_recreate(self, mock_db):
self.db_commands.recreate()
calls = [mock.call.db_drop(), mock.call.db_create()]
self.assertEqual(calls, mock_db.mock_calls)
@mock.patch('rally.cmd.manage.cliutils')
def test_main(self, cli_mock):
manage.main()
categories = {'db': manage.DBCommands}
cli_mock.run.assert_called_once_with(sys.argv, categories)
|
|
95a53e362eac0b42e9f77bf9dba2b43a71b95e6d
|
vertex/repositories/__init__.py
|
vertex/repositories/__init__.py
|
from grapher import commons, errors
from grapher.repositories.base import EntityRepository
class CachedRepository(EntityRepository):
data = {}
def where(self, skip=0, limit=None, **query):
return []
def create(self, entities):
result = []
for entity in entities:
self.data[entity[self.identity]] = entity
del entity[self.identity]
result.append(entity)
return result
def all(self, skip=0, limit=None):
return commons.CollectionHelper.restore_enumeration(self.data, False)
def delete(self, identities):
result = []
for identity in identities:
result.append(self.data[identity])
del self.data[identity]
return result
def find(self, identities):
try:
return [self.data[i] for i in identities]
except KeyError:
raise errors.NotFoundError(('NOT_FOUND', identities))
def update(self, entities):
result = []
for entity in entities:
identity = entity[self.identity]
del entity[self.identity]
self.data[identity].update(entity)
result.append(self.data[identity])
return result
|
Remove commons.request. Tests are now mocking flask_restful.request module.
|
Remove commons.request.
Tests are now mocking flask_restful.request module.
|
Python
|
mit
|
lucasdavid/vertex
|
Remove commons.request.
Tests are now mocking flask_restful.request module.
|
from grapher import commons, errors
from grapher.repositories.base import EntityRepository
class CachedRepository(EntityRepository):
data = {}
def where(self, skip=0, limit=None, **query):
return []
def create(self, entities):
result = []
for entity in entities:
self.data[entity[self.identity]] = entity
del entity[self.identity]
result.append(entity)
return result
def all(self, skip=0, limit=None):
return commons.CollectionHelper.restore_enumeration(self.data, False)
def delete(self, identities):
result = []
for identity in identities:
result.append(self.data[identity])
del self.data[identity]
return result
def find(self, identities):
try:
return [self.data[i] for i in identities]
except KeyError:
raise errors.NotFoundError(('NOT_FOUND', identities))
def update(self, entities):
result = []
for entity in entities:
identity = entity[self.identity]
del entity[self.identity]
self.data[identity].update(entity)
result.append(self.data[identity])
return result
|
<commit_before><commit_msg> Remove commons.request.
Tests are now mocking flask_restful.request module.<commit_after>
|
from grapher import commons, errors
from grapher.repositories.base import EntityRepository
class CachedRepository(EntityRepository):
data = {}
def where(self, skip=0, limit=None, **query):
return []
def create(self, entities):
result = []
for entity in entities:
self.data[entity[self.identity]] = entity
del entity[self.identity]
result.append(entity)
return result
def all(self, skip=0, limit=None):
return commons.CollectionHelper.restore_enumeration(self.data, False)
def delete(self, identities):
result = []
for identity in identities:
result.append(self.data[identity])
del self.data[identity]
return result
def find(self, identities):
try:
return [self.data[i] for i in identities]
except KeyError:
raise errors.NotFoundError(('NOT_FOUND', identities))
def update(self, entities):
result = []
for entity in entities:
identity = entity[self.identity]
del entity[self.identity]
self.data[identity].update(entity)
result.append(self.data[identity])
return result
|
Remove commons.request.
Tests are now mocking flask_restful.request module.from grapher import commons, errors
from grapher.repositories.base import EntityRepository
class CachedRepository(EntityRepository):
data = {}
def where(self, skip=0, limit=None, **query):
return []
def create(self, entities):
result = []
for entity in entities:
self.data[entity[self.identity]] = entity
del entity[self.identity]
result.append(entity)
return result
def all(self, skip=0, limit=None):
return commons.CollectionHelper.restore_enumeration(self.data, False)
def delete(self, identities):
result = []
for identity in identities:
result.append(self.data[identity])
del self.data[identity]
return result
def find(self, identities):
try:
return [self.data[i] for i in identities]
except KeyError:
raise errors.NotFoundError(('NOT_FOUND', identities))
def update(self, entities):
result = []
for entity in entities:
identity = entity[self.identity]
del entity[self.identity]
self.data[identity].update(entity)
result.append(self.data[identity])
return result
|
<commit_before><commit_msg> Remove commons.request.
Tests are now mocking flask_restful.request module.<commit_after>from grapher import commons, errors
from grapher.repositories.base import EntityRepository
class CachedRepository(EntityRepository):
data = {}
def where(self, skip=0, limit=None, **query):
return []
def create(self, entities):
result = []
for entity in entities:
self.data[entity[self.identity]] = entity
del entity[self.identity]
result.append(entity)
return result
def all(self, skip=0, limit=None):
return commons.CollectionHelper.restore_enumeration(self.data, False)
def delete(self, identities):
result = []
for identity in identities:
result.append(self.data[identity])
del self.data[identity]
return result
def find(self, identities):
try:
return [self.data[i] for i in identities]
except KeyError:
raise errors.NotFoundError(('NOT_FOUND', identities))
def update(self, entities):
result = []
for entity in entities:
identity = entity[self.identity]
del entity[self.identity]
self.data[identity].update(entity)
result.append(self.data[identity])
return result
|
|
783165571d6fe5b0e655e8c2a4cf7a77e74b3ea1
|
wger/manager/migrations/0007_auto_20160311_2258.py
|
wger/manager/migrations/0007_auto_20160311_2258.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def convert_logs(apps, schema_editor):
'''
Adds a unit to users who have imperial units in the profile
'''
WorkoutLog = apps.get_model('manager', 'WorkoutLog')
UserProfile = apps.get_model('core', 'UserProfile')
for profile in UserProfile.objects.filter(weight_unit='lb'):
WorkoutLog.objects.filter(user=profile.user).update(weight_unit=2)
def convert_settings(apps, schema_editor):
'''
Adds a unit to workout settings that have 99 for 'until failure'
'''
Setting = apps.get_model('manager', 'Setting')
Setting.objects.filter(reps=99).update(reps=1, repetition_unit=2)
class Migration(migrations.Migration):
dependencies = [
('manager', '0006_auto_20160303_2138'),
]
operations = [
migrations.RunPython(convert_logs, reverse_code=migrations.RunPython.noop),
migrations.RunPython(convert_settings, reverse_code=migrations.RunPython.noop),
]
|
Add data migration for settings and logs
|
Add data migration for settings and logs
|
Python
|
agpl-3.0
|
wger-project/wger,petervanderdoes/wger,petervanderdoes/wger,kjagoo/wger_stark,DeveloperMal/wger,DeveloperMal/wger,wger-project/wger,rolandgeider/wger,kjagoo/wger_stark,kjagoo/wger_stark,rolandgeider/wger,rolandgeider/wger,DeveloperMal/wger,DeveloperMal/wger,rolandgeider/wger,kjagoo/wger_stark,petervanderdoes/wger,wger-project/wger,wger-project/wger,petervanderdoes/wger
|
Add data migration for settings and logs
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def convert_logs(apps, schema_editor):
'''
Adds a unit to users who have imperial units in the profile
'''
WorkoutLog = apps.get_model('manager', 'WorkoutLog')
UserProfile = apps.get_model('core', 'UserProfile')
for profile in UserProfile.objects.filter(weight_unit='lb'):
WorkoutLog.objects.filter(user=profile.user).update(weight_unit=2)
def convert_settings(apps, schema_editor):
'''
Adds a unit to workout settings that have 99 for 'until failure'
'''
Setting = apps.get_model('manager', 'Setting')
Setting.objects.filter(reps=99).update(reps=1, repetition_unit=2)
class Migration(migrations.Migration):
dependencies = [
('manager', '0006_auto_20160303_2138'),
]
operations = [
migrations.RunPython(convert_logs, reverse_code=migrations.RunPython.noop),
migrations.RunPython(convert_settings, reverse_code=migrations.RunPython.noop),
]
|
<commit_before><commit_msg>Add data migration for settings and logs<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def convert_logs(apps, schema_editor):
'''
Adds a unit to users who have imperial units in the profile
'''
WorkoutLog = apps.get_model('manager', 'WorkoutLog')
UserProfile = apps.get_model('core', 'UserProfile')
for profile in UserProfile.objects.filter(weight_unit='lb'):
WorkoutLog.objects.filter(user=profile.user).update(weight_unit=2)
def convert_settings(apps, schema_editor):
'''
Adds a unit to workout settings that have 99 for 'until failure'
'''
Setting = apps.get_model('manager', 'Setting')
Setting.objects.filter(reps=99).update(reps=1, repetition_unit=2)
class Migration(migrations.Migration):
dependencies = [
('manager', '0006_auto_20160303_2138'),
]
operations = [
migrations.RunPython(convert_logs, reverse_code=migrations.RunPython.noop),
migrations.RunPython(convert_settings, reverse_code=migrations.RunPython.noop),
]
|
Add data migration for settings and logs# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def convert_logs(apps, schema_editor):
'''
Adds a unit to users who have imperial units in the profile
'''
WorkoutLog = apps.get_model('manager', 'WorkoutLog')
UserProfile = apps.get_model('core', 'UserProfile')
for profile in UserProfile.objects.filter(weight_unit='lb'):
WorkoutLog.objects.filter(user=profile.user).update(weight_unit=2)
def convert_settings(apps, schema_editor):
'''
Adds a unit to workout settings that have 99 for 'until failure'
'''
Setting = apps.get_model('manager', 'Setting')
Setting.objects.filter(reps=99).update(reps=1, repetition_unit=2)
class Migration(migrations.Migration):
dependencies = [
('manager', '0006_auto_20160303_2138'),
]
operations = [
migrations.RunPython(convert_logs, reverse_code=migrations.RunPython.noop),
migrations.RunPython(convert_settings, reverse_code=migrations.RunPython.noop),
]
|
<commit_before><commit_msg>Add data migration for settings and logs<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def convert_logs(apps, schema_editor):
'''
Adds a unit to users who have imperial units in the profile
'''
WorkoutLog = apps.get_model('manager', 'WorkoutLog')
UserProfile = apps.get_model('core', 'UserProfile')
for profile in UserProfile.objects.filter(weight_unit='lb'):
WorkoutLog.objects.filter(user=profile.user).update(weight_unit=2)
def convert_settings(apps, schema_editor):
'''
Adds a unit to workout settings that have 99 for 'until failure'
'''
Setting = apps.get_model('manager', 'Setting')
Setting.objects.filter(reps=99).update(reps=1, repetition_unit=2)
class Migration(migrations.Migration):
dependencies = [
('manager', '0006_auto_20160303_2138'),
]
operations = [
migrations.RunPython(convert_logs, reverse_code=migrations.RunPython.noop),
migrations.RunPython(convert_settings, reverse_code=migrations.RunPython.noop),
]
|
|
206547027c6390664e498b6c5e00e48aa07a198c
|
libnamebench/config_test.py
|
libnamebench/config_test.py
|
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the config module."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import unittest
import config
class ConfigTest(unittest.TestCase):
def testParseFullLine(self):
line = '129.250.35.251=NTT (2) # y.ns.gin.ntt.net,39.569,-104.8582 (Englewood/CO/US)'
expected = {'name': 'NTT (2)', 'service': 'NTT', 'ip': '129.250.35.251',
'lon': '-104.8582', 'instance': '2', 'country_code': 'US',
'lat': '39.569'}
self.assertEquals(config._ParseServerLine(line), expected)
def testOpenDNSLine(self):
line = '208.67.220.220=OpenDNS # resolver2.opendns.com'
expected = {'name': 'OpenDNS', 'service': 'OpenDNS', 'ip': '208.67.220.220',
'lon': None, 'instance': None, 'country_code': None,
'lat': None}
self.assertEquals(config._ParseServerLine(line), expected)
def testLineWithNoRegion(self):
line = '4.2.2.2=Level/GTEI-2 (3) # vnsc-bak.sys.gtei.net,38.0,-97.0 (US) '
expected = {'name': 'Level/GTEI-2 (3)', 'service': 'Level/GTEI-2',
'ip': '4.2.2.2', 'lon': '-97.0', 'instance': '3',
'country_code': 'US', 'lat': '38.0'}
self.assertEquals(config._ParseServerLine(line), expected)
if __name__ == '__main__':
unittest.main()
|
Add some tests for dns config parsing
|
Add some tests for dns config parsing
|
Python
|
apache-2.0
|
somehume/namebench
|
Add some tests for dns config parsing
|
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the config module."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import unittest
import config
class ConfigTest(unittest.TestCase):
def testParseFullLine(self):
line = '129.250.35.251=NTT (2) # y.ns.gin.ntt.net,39.569,-104.8582 (Englewood/CO/US)'
expected = {'name': 'NTT (2)', 'service': 'NTT', 'ip': '129.250.35.251',
'lon': '-104.8582', 'instance': '2', 'country_code': 'US',
'lat': '39.569'}
self.assertEquals(config._ParseServerLine(line), expected)
def testOpenDNSLine(self):
line = '208.67.220.220=OpenDNS # resolver2.opendns.com'
expected = {'name': 'OpenDNS', 'service': 'OpenDNS', 'ip': '208.67.220.220',
'lon': None, 'instance': None, 'country_code': None,
'lat': None}
self.assertEquals(config._ParseServerLine(line), expected)
def testLineWithNoRegion(self):
line = '4.2.2.2=Level/GTEI-2 (3) # vnsc-bak.sys.gtei.net,38.0,-97.0 (US) '
expected = {'name': 'Level/GTEI-2 (3)', 'service': 'Level/GTEI-2',
'ip': '4.2.2.2', 'lon': '-97.0', 'instance': '3',
'country_code': 'US', 'lat': '38.0'}
self.assertEquals(config._ParseServerLine(line), expected)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add some tests for dns config parsing<commit_after>
|
#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the config module."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import unittest
import config
class ConfigTest(unittest.TestCase):
def testParseFullLine(self):
line = '129.250.35.251=NTT (2) # y.ns.gin.ntt.net,39.569,-104.8582 (Englewood/CO/US)'
expected = {'name': 'NTT (2)', 'service': 'NTT', 'ip': '129.250.35.251',
'lon': '-104.8582', 'instance': '2', 'country_code': 'US',
'lat': '39.569'}
self.assertEquals(config._ParseServerLine(line), expected)
def testOpenDNSLine(self):
line = '208.67.220.220=OpenDNS # resolver2.opendns.com'
expected = {'name': 'OpenDNS', 'service': 'OpenDNS', 'ip': '208.67.220.220',
'lon': None, 'instance': None, 'country_code': None,
'lat': None}
self.assertEquals(config._ParseServerLine(line), expected)
def testLineWithNoRegion(self):
line = '4.2.2.2=Level/GTEI-2 (3) # vnsc-bak.sys.gtei.net,38.0,-97.0 (US) '
expected = {'name': 'Level/GTEI-2 (3)', 'service': 'Level/GTEI-2',
'ip': '4.2.2.2', 'lon': '-97.0', 'instance': '3',
'country_code': 'US', 'lat': '38.0'}
self.assertEquals(config._ParseServerLine(line), expected)
if __name__ == '__main__':
unittest.main()
|
Add some tests for dns config parsing#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the config module."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import unittest
import config
class ConfigTest(unittest.TestCase):
def testParseFullLine(self):
line = '129.250.35.251=NTT (2) # y.ns.gin.ntt.net,39.569,-104.8582 (Englewood/CO/US)'
expected = {'name': 'NTT (2)', 'service': 'NTT', 'ip': '129.250.35.251',
'lon': '-104.8582', 'instance': '2', 'country_code': 'US',
'lat': '39.569'}
self.assertEquals(config._ParseServerLine(line), expected)
def testOpenDNSLine(self):
line = '208.67.220.220=OpenDNS # resolver2.opendns.com'
expected = {'name': 'OpenDNS', 'service': 'OpenDNS', 'ip': '208.67.220.220',
'lon': None, 'instance': None, 'country_code': None,
'lat': None}
self.assertEquals(config._ParseServerLine(line), expected)
def testLineWithNoRegion(self):
line = '4.2.2.2=Level/GTEI-2 (3) # vnsc-bak.sys.gtei.net,38.0,-97.0 (US) '
expected = {'name': 'Level/GTEI-2 (3)', 'service': 'Level/GTEI-2',
'ip': '4.2.2.2', 'lon': '-97.0', 'instance': '3',
'country_code': 'US', 'lat': '38.0'}
self.assertEquals(config._ParseServerLine(line), expected)
if __name__ == '__main__':
unittest.main()
|
<commit_before><commit_msg>Add some tests for dns config parsing<commit_after>#!/usr/bin/env python
# Copyright 2009 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the config module."""
__author__ = 'tstromberg@google.com (Thomas Stromberg)'
import unittest
import config
class ConfigTest(unittest.TestCase):
def testParseFullLine(self):
line = '129.250.35.251=NTT (2) # y.ns.gin.ntt.net,39.569,-104.8582 (Englewood/CO/US)'
expected = {'name': 'NTT (2)', 'service': 'NTT', 'ip': '129.250.35.251',
'lon': '-104.8582', 'instance': '2', 'country_code': 'US',
'lat': '39.569'}
self.assertEquals(config._ParseServerLine(line), expected)
def testOpenDNSLine(self):
line = '208.67.220.220=OpenDNS # resolver2.opendns.com'
expected = {'name': 'OpenDNS', 'service': 'OpenDNS', 'ip': '208.67.220.220',
'lon': None, 'instance': None, 'country_code': None,
'lat': None}
self.assertEquals(config._ParseServerLine(line), expected)
def testLineWithNoRegion(self):
line = '4.2.2.2=Level/GTEI-2 (3) # vnsc-bak.sys.gtei.net,38.0,-97.0 (US) '
expected = {'name': 'Level/GTEI-2 (3)', 'service': 'Level/GTEI-2',
'ip': '4.2.2.2', 'lon': '-97.0', 'instance': '3',
'country_code': 'US', 'lat': '38.0'}
self.assertEquals(config._ParseServerLine(line), expected)
if __name__ == '__main__':
unittest.main()
|
|
a1ef84da0df661a9371df23a623daf2b51304e89
|
scripts/domain/effects.py
|
scripts/domain/effects.py
|
class Effects:
def attack(self, player, dices):
pass
def defend(self, player, dmg, dodge):
pass
def dispute(self, player, attrName):
pass
|
Add first version of the Effects base class
|
Add first version of the Effects base class
|
Python
|
mit
|
VinGarcia/jit-rpg-system
|
Add first version of the Effects base class
|
class Effects:
def attack(self, player, dices):
pass
def defend(self, player, dmg, dodge):
pass
def dispute(self, player, attrName):
pass
|
<commit_before><commit_msg>Add first version of the Effects base class<commit_after>
|
class Effects:
def attack(self, player, dices):
pass
def defend(self, player, dmg, dodge):
pass
def dispute(self, player, attrName):
pass
|
Add first version of the Effects base class
class Effects:
def attack(self, player, dices):
pass
def defend(self, player, dmg, dodge):
pass
def dispute(self, player, attrName):
pass
|
<commit_before><commit_msg>Add first version of the Effects base class<commit_after>
class Effects:
def attack(self, player, dices):
pass
def defend(self, player, dmg, dodge):
pass
def dispute(self, player, attrName):
pass
|
|
3b467abc665a1807d8a1adbba1be78d40f77b4ce
|
tests/unit/dataactcore/factories/job.py
|
tests/unit/dataactcore/factories/job.py
|
import factory
from factory import fuzzy
from datetime import date, datetime, timezone
from dataactcore.models import jobModels
class SubmissionFactory(factory.Factory):
class Meta:
model = jobModels.Submission
submission_id = None
datetime_utc = fuzzy.FuzzyDateTime(
datetime(2010, 1, 1, tzinfo=timezone.utc))
user_id = fuzzy.FuzzyInteger(1, 9999)
cgac_code = fuzzy.FuzzyText()
reporting_start_date = fuzzy.FuzzyDate(date(2010, 1, 1))
reporting_end_date = fuzzy.FuzzyDate(date(2010, 1, 1))
reporting_fiscal_year = fuzzy.FuzzyInteger(2010, 2040)
reporting_fiscal_period = fuzzy.FuzzyInteger(1, 4)
is_quarter_format = False
publishable = False
number_of_errors = 0
number_of_warnings = 0
|
Add factory for Submission model
|
Add factory for Submission model
|
Python
|
cc0-1.0
|
chambers-brian/SIG_Digital-Strategy_SI_ODP_Backend,fedspendingtransparency/data-act-broker-backend,fedspendingtransparency/data-act-broker-backend,chambers-brian/SIG_Digital-Strategy_SI_ODP_Backend
|
Add factory for Submission model
|
import factory
from factory import fuzzy
from datetime import date, datetime, timezone
from dataactcore.models import jobModels
class SubmissionFactory(factory.Factory):
class Meta:
model = jobModels.Submission
submission_id = None
datetime_utc = fuzzy.FuzzyDateTime(
datetime(2010, 1, 1, tzinfo=timezone.utc))
user_id = fuzzy.FuzzyInteger(1, 9999)
cgac_code = fuzzy.FuzzyText()
reporting_start_date = fuzzy.FuzzyDate(date(2010, 1, 1))
reporting_end_date = fuzzy.FuzzyDate(date(2010, 1, 1))
reporting_fiscal_year = fuzzy.FuzzyInteger(2010, 2040)
reporting_fiscal_period = fuzzy.FuzzyInteger(1, 4)
is_quarter_format = False
publishable = False
number_of_errors = 0
number_of_warnings = 0
|
<commit_before><commit_msg>Add factory for Submission model<commit_after>
|
import factory
from factory import fuzzy
from datetime import date, datetime, timezone
from dataactcore.models import jobModels
class SubmissionFactory(factory.Factory):
class Meta:
model = jobModels.Submission
submission_id = None
datetime_utc = fuzzy.FuzzyDateTime(
datetime(2010, 1, 1, tzinfo=timezone.utc))
user_id = fuzzy.FuzzyInteger(1, 9999)
cgac_code = fuzzy.FuzzyText()
reporting_start_date = fuzzy.FuzzyDate(date(2010, 1, 1))
reporting_end_date = fuzzy.FuzzyDate(date(2010, 1, 1))
reporting_fiscal_year = fuzzy.FuzzyInteger(2010, 2040)
reporting_fiscal_period = fuzzy.FuzzyInteger(1, 4)
is_quarter_format = False
publishable = False
number_of_errors = 0
number_of_warnings = 0
|
Add factory for Submission modelimport factory
from factory import fuzzy
from datetime import date, datetime, timezone
from dataactcore.models import jobModels
class SubmissionFactory(factory.Factory):
class Meta:
model = jobModels.Submission
submission_id = None
datetime_utc = fuzzy.FuzzyDateTime(
datetime(2010, 1, 1, tzinfo=timezone.utc))
user_id = fuzzy.FuzzyInteger(1, 9999)
cgac_code = fuzzy.FuzzyText()
reporting_start_date = fuzzy.FuzzyDate(date(2010, 1, 1))
reporting_end_date = fuzzy.FuzzyDate(date(2010, 1, 1))
reporting_fiscal_year = fuzzy.FuzzyInteger(2010, 2040)
reporting_fiscal_period = fuzzy.FuzzyInteger(1, 4)
is_quarter_format = False
publishable = False
number_of_errors = 0
number_of_warnings = 0
|
<commit_before><commit_msg>Add factory for Submission model<commit_after>import factory
from factory import fuzzy
from datetime import date, datetime, timezone
from dataactcore.models import jobModels
class SubmissionFactory(factory.Factory):
class Meta:
model = jobModels.Submission
submission_id = None
datetime_utc = fuzzy.FuzzyDateTime(
datetime(2010, 1, 1, tzinfo=timezone.utc))
user_id = fuzzy.FuzzyInteger(1, 9999)
cgac_code = fuzzy.FuzzyText()
reporting_start_date = fuzzy.FuzzyDate(date(2010, 1, 1))
reporting_end_date = fuzzy.FuzzyDate(date(2010, 1, 1))
reporting_fiscal_year = fuzzy.FuzzyInteger(2010, 2040)
reporting_fiscal_period = fuzzy.FuzzyInteger(1, 4)
is_quarter_format = False
publishable = False
number_of_errors = 0
number_of_warnings = 0
|
|
ffb1a660401d12eccb31787becc9c7ca45edaedc
|
algorithms/distances/euclidean_distance.py
|
algorithms/distances/euclidean_distance.py
|
from math import sqrt
from .distance_abstract import DistanceAbstract
class EuclideanDistance(DistanceAbstract):
def measure(cls, list1, list2):
distance = 0
min_len = min(len(list1), len(list2))
for i in range(0, min_len):
distance += sqrt(abs(list1[i]**2 - list2[i]**2))
return distance
|
Add support for Euclidean distance
|
Add support for Euclidean distance
|
Python
|
mit
|
theowni/Keyboard-Typing-User-Recognition
|
Add support for Euclidean distance
|
from math import sqrt
from .distance_abstract import DistanceAbstract
class EuclideanDistance(DistanceAbstract):
def measure(cls, list1, list2):
distance = 0
min_len = min(len(list1), len(list2))
for i in range(0, min_len):
distance += sqrt(abs(list1[i]**2 - list2[i]**2))
return distance
|
<commit_before><commit_msg>Add support for Euclidean distance<commit_after>
|
from math import sqrt
from .distance_abstract import DistanceAbstract
class EuclideanDistance(DistanceAbstract):
def measure(cls, list1, list2):
distance = 0
min_len = min(len(list1), len(list2))
for i in range(0, min_len):
distance += sqrt(abs(list1[i]**2 - list2[i]**2))
return distance
|
Add support for Euclidean distancefrom math import sqrt
from .distance_abstract import DistanceAbstract
class EuclideanDistance(DistanceAbstract):
def measure(cls, list1, list2):
distance = 0
min_len = min(len(list1), len(list2))
for i in range(0, min_len):
distance += sqrt(abs(list1[i]**2 - list2[i]**2))
return distance
|
<commit_before><commit_msg>Add support for Euclidean distance<commit_after>from math import sqrt
from .distance_abstract import DistanceAbstract
class EuclideanDistance(DistanceAbstract):
def measure(cls, list1, list2):
distance = 0
min_len = min(len(list1), len(list2))
for i in range(0, min_len):
distance += sqrt(abs(list1[i]**2 - list2[i]**2))
return distance
|
|
ef7406a0ba6d87311c55bd0422bb2a411a51043a
|
vectorExtractorWithTuples.py
|
vectorExtractorWithTuples.py
|
import string
import sklearn
import numpy as np
from titleParse import *
import os
from sklearn import svm
import re
from featureHelpers.py import *
#testStringList = getTitles("test_data" + os.sep + "merged.txt")
#testStringList = getTitles("test.txt")
"""This is a function designed to extract an attribute vector out of the text of
a Craigslist posting. These attribute vectors will be fed to the SciKit Learn
module to determine the quality of the posting itself."""
clf = svm.SVC()
def extractVectorsFromListOfPosts(postList):
def extractVectorFromPost(postText):
upperCaseText = string.upper(postText)
count = len(postText)
whiteCount, letterCount, symbolCount, lowerCaseCount = 0, 0 ,0, 0
for i in xrange(count):
if postText[i] in string.whitespace: whiteCount += 1
elif postText[i] in string.ascii_letters:
letterCount += 1
lowerCaseCount += (1 - (upperCaseText[i] == postText[i]))
else: symbolCount += 1
#Python boolean arithmetic casts True to 1 and 0 to False.
#If a char was lowercase, the count will increase
upperCaseRatio = 1 - float(lowerCaseCount)/letterCount
symbolRatio = float(symbolCount)/count
whiteRatio = float(whiteCount)/count
return [upperCaseRatio, symbolRatio, whiteRatio,count]
result = np.array(map(extractVectorFromPost,postList))
#print result
np.set_printoptions(precision=3)
np.savetxt('long_run.txt',result)
return result
def writeFile(filename, contents, mode="wt"):
"""This is a function taken from the 15-112 website. It writes
the string contents to the path defined by the string filename"""
# wt stands for "write text"
fout = None
try:
fout = open(filename, mode)
fout.write(contents)
finally:
if (fout != None): fout.close()
return True
def predictScoreForArrayOfVectors(vec_arr):
for vec in vec_arr:
print clf.predict(vec)
return
def getLearningModelFromArray(data_array, scores):
clf.fit(data_array,np.array(scores))
return True
(scores,titles) = getTitles('output3.txt')
vectors = extractVectorsFromListOfPosts(titles)
getLearningModelFromArray(vectors,scores)
|
Revert "Got rid of tuples"
|
Revert "Got rid of tuples"
This reverts commit 958ab35bcf408502f1347db5720241c489de8260.
|
Python
|
apache-2.0
|
bharadwajramachandran/TartanRoof
|
Revert "Got rid of tuples"
This reverts commit 958ab35bcf408502f1347db5720241c489de8260.
|
import string
import sklearn
import numpy as np
from titleParse import *
import os
from sklearn import svm
import re
from featureHelpers.py import *
#testStringList = getTitles("test_data" + os.sep + "merged.txt")
#testStringList = getTitles("test.txt")
"""This is a function designed to extract an attribute vector out of the text of
a Craigslist posting. These attribute vectors will be fed to the SciKit Learn
module to determine the quality of the posting itself."""
clf = svm.SVC()
def extractVectorsFromListOfPosts(postList):
def extractVectorFromPost(postText):
upperCaseText = string.upper(postText)
count = len(postText)
whiteCount, letterCount, symbolCount, lowerCaseCount = 0, 0 ,0, 0
for i in xrange(count):
if postText[i] in string.whitespace: whiteCount += 1
elif postText[i] in string.ascii_letters:
letterCount += 1
lowerCaseCount += (1 - (upperCaseText[i] == postText[i]))
else: symbolCount += 1
#Python boolean arithmetic casts True to 1 and 0 to False.
#If a char was lowercase, the count will increase
upperCaseRatio = 1 - float(lowerCaseCount)/letterCount
symbolRatio = float(symbolCount)/count
whiteRatio = float(whiteCount)/count
return [upperCaseRatio, symbolRatio, whiteRatio,count]
result = np.array(map(extractVectorFromPost,postList))
#print result
np.set_printoptions(precision=3)
np.savetxt('long_run.txt',result)
return result
def writeFile(filename, contents, mode="wt"):
"""This is a function taken from the 15-112 website. It writes
the string contents to the path defined by the string filename"""
# wt stands for "write text"
fout = None
try:
fout = open(filename, mode)
fout.write(contents)
finally:
if (fout != None): fout.close()
return True
def predictScoreForArrayOfVectors(vec_arr):
for vec in vec_arr:
print clf.predict(vec)
return
def getLearningModelFromArray(data_array, scores):
clf.fit(data_array,np.array(scores))
return True
(scores,titles) = getTitles('output3.txt')
vectors = extractVectorsFromListOfPosts(titles)
getLearningModelFromArray(vectors,scores)
|
<commit_before><commit_msg>Revert "Got rid of tuples"
This reverts commit 958ab35bcf408502f1347db5720241c489de8260.<commit_after>
|
import string
import sklearn
import numpy as np
from titleParse import *
import os
from sklearn import svm
import re
from featureHelpers.py import *
#testStringList = getTitles("test_data" + os.sep + "merged.txt")
#testStringList = getTitles("test.txt")
"""This is a function designed to extract an attribute vector out of the text of
a Craigslist posting. These attribute vectors will be fed to the SciKit Learn
module to determine the quality of the posting itself."""
clf = svm.SVC()
def extractVectorsFromListOfPosts(postList):
def extractVectorFromPost(postText):
upperCaseText = string.upper(postText)
count = len(postText)
whiteCount, letterCount, symbolCount, lowerCaseCount = 0, 0 ,0, 0
for i in xrange(count):
if postText[i] in string.whitespace: whiteCount += 1
elif postText[i] in string.ascii_letters:
letterCount += 1
lowerCaseCount += (1 - (upperCaseText[i] == postText[i]))
else: symbolCount += 1
#Python boolean arithmetic casts True to 1 and 0 to False.
#If a char was lowercase, the count will increase
upperCaseRatio = 1 - float(lowerCaseCount)/letterCount
symbolRatio = float(symbolCount)/count
whiteRatio = float(whiteCount)/count
return [upperCaseRatio, symbolRatio, whiteRatio,count]
result = np.array(map(extractVectorFromPost,postList))
#print result
np.set_printoptions(precision=3)
np.savetxt('long_run.txt',result)
return result
def writeFile(filename, contents, mode="wt"):
"""This is a function taken from the 15-112 website. It writes
the string contents to the path defined by the string filename"""
# wt stands for "write text"
fout = None
try:
fout = open(filename, mode)
fout.write(contents)
finally:
if (fout != None): fout.close()
return True
def predictScoreForArrayOfVectors(vec_arr):
for vec in vec_arr:
print clf.predict(vec)
return
def getLearningModelFromArray(data_array, scores):
clf.fit(data_array,np.array(scores))
return True
(scores,titles) = getTitles('output3.txt')
vectors = extractVectorsFromListOfPosts(titles)
getLearningModelFromArray(vectors,scores)
|
Revert "Got rid of tuples"
This reverts commit 958ab35bcf408502f1347db5720241c489de8260.import string
import sklearn
import numpy as np
from titleParse import *
import os
from sklearn import svm
import re
from featureHelpers.py import *
#testStringList = getTitles("test_data" + os.sep + "merged.txt")
#testStringList = getTitles("test.txt")
"""This is a function designed to extract an attribute vector out of the text of
a Craigslist posting. These attribute vectors will be fed to the SciKit Learn
module to determine the quality of the posting itself."""
clf = svm.SVC()
def extractVectorsFromListOfPosts(postList):
def extractVectorFromPost(postText):
upperCaseText = string.upper(postText)
count = len(postText)
whiteCount, letterCount, symbolCount, lowerCaseCount = 0, 0 ,0, 0
for i in xrange(count):
if postText[i] in string.whitespace: whiteCount += 1
elif postText[i] in string.ascii_letters:
letterCount += 1
lowerCaseCount += (1 - (upperCaseText[i] == postText[i]))
else: symbolCount += 1
#Python boolean arithmetic casts True to 1 and 0 to False.
#If a char was lowercase, the count will increase
upperCaseRatio = 1 - float(lowerCaseCount)/letterCount
symbolRatio = float(symbolCount)/count
whiteRatio = float(whiteCount)/count
return [upperCaseRatio, symbolRatio, whiteRatio,count]
result = np.array(map(extractVectorFromPost,postList))
#print result
np.set_printoptions(precision=3)
np.savetxt('long_run.txt',result)
return result
def writeFile(filename, contents, mode="wt"):
"""This is a function taken from the 15-112 website. It writes
the string contents to the path defined by the string filename"""
# wt stands for "write text"
fout = None
try:
fout = open(filename, mode)
fout.write(contents)
finally:
if (fout != None): fout.close()
return True
def predictScoreForArrayOfVectors(vec_arr):
for vec in vec_arr:
print clf.predict(vec)
return
def getLearningModelFromArray(data_array, scores):
clf.fit(data_array,np.array(scores))
return True
(scores,titles) = getTitles('output3.txt')
vectors = extractVectorsFromListOfPosts(titles)
getLearningModelFromArray(vectors,scores)
|
<commit_before><commit_msg>Revert "Got rid of tuples"
This reverts commit 958ab35bcf408502f1347db5720241c489de8260.<commit_after>import string
import sklearn
import numpy as np
from titleParse import *
import os
from sklearn import svm
import re
from featureHelpers.py import *
#testStringList = getTitles("test_data" + os.sep + "merged.txt")
#testStringList = getTitles("test.txt")
"""This is a function designed to extract an attribute vector out of the text of
a Craigslist posting. These attribute vectors will be fed to the SciKit Learn
module to determine the quality of the posting itself."""
clf = svm.SVC()
def extractVectorsFromListOfPosts(postList):
def extractVectorFromPost(postText):
upperCaseText = string.upper(postText)
count = len(postText)
whiteCount, letterCount, symbolCount, lowerCaseCount = 0, 0 ,0, 0
for i in xrange(count):
if postText[i] in string.whitespace: whiteCount += 1
elif postText[i] in string.ascii_letters:
letterCount += 1
lowerCaseCount += (1 - (upperCaseText[i] == postText[i]))
else: symbolCount += 1
#Python boolean arithmetic casts True to 1 and 0 to False.
#If a char was lowercase, the count will increase
upperCaseRatio = 1 - float(lowerCaseCount)/letterCount
symbolRatio = float(symbolCount)/count
whiteRatio = float(whiteCount)/count
return [upperCaseRatio, symbolRatio, whiteRatio,count]
result = np.array(map(extractVectorFromPost,postList))
#print result
np.set_printoptions(precision=3)
np.savetxt('long_run.txt',result)
return result
def writeFile(filename, contents, mode="wt"):
"""This is a function taken from the 15-112 website. It writes
the string contents to the path defined by the string filename"""
# wt stands for "write text"
fout = None
try:
fout = open(filename, mode)
fout.write(contents)
finally:
if (fout != None): fout.close()
return True
def predictScoreForArrayOfVectors(vec_arr):
for vec in vec_arr:
print clf.predict(vec)
return
def getLearningModelFromArray(data_array, scores):
clf.fit(data_array,np.array(scores))
return True
(scores,titles) = getTitles('output3.txt')
vectors = extractVectorsFromListOfPosts(titles)
getLearningModelFromArray(vectors,scores)
|
|
09a2975b8940a2726620ffff95950ecdad3ca884
|
pykmer/codec16.py
|
pykmer/codec16.py
|
def encode(x):
"encode an integer using a 15-bit+continuation-bit encodeing"
r = []
while True:
r.append(x & 32767)
x >>= 15
if x == 0:
break
r = r[::-1]
n = len(r) - 1
i = 0
while i < n:
r[i] |= 32768
i += 1
return r
def encodeInto(x, r):
"encode an integer using a 7-bit+continuation-bit encodeing into an existing list"
n = 0
y = x
while True:
n += 1
y >>= 15
r.append(0)
if y == 0:
break
v = n
i = -1
m = 0
while n > 0:
r[i] = (x & 32767) | m
x >>= 15
i -= 1
m = 32768
n -= 1
def decode(itr):
"dencode an integer from a 7-bit+continuation-bit encodeing"
r = 0
x = itr.next()
r = (x & 32767)
while x & 32768:
x = itr.next()
r = (r << 15) | (x & 32767)
return r
|
Add a 15/1 bit codec.
|
Add a 15/1 bit codec.
|
Python
|
apache-2.0
|
drtconway/pykmer
|
Add a 15/1 bit codec.
|
def encode(x):
"encode an integer using a 15-bit+continuation-bit encodeing"
r = []
while True:
r.append(x & 32767)
x >>= 15
if x == 0:
break
r = r[::-1]
n = len(r) - 1
i = 0
while i < n:
r[i] |= 32768
i += 1
return r
def encodeInto(x, r):
"encode an integer using a 7-bit+continuation-bit encodeing into an existing list"
n = 0
y = x
while True:
n += 1
y >>= 15
r.append(0)
if y == 0:
break
v = n
i = -1
m = 0
while n > 0:
r[i] = (x & 32767) | m
x >>= 15
i -= 1
m = 32768
n -= 1
def decode(itr):
"dencode an integer from a 7-bit+continuation-bit encodeing"
r = 0
x = itr.next()
r = (x & 32767)
while x & 32768:
x = itr.next()
r = (r << 15) | (x & 32767)
return r
|
<commit_before><commit_msg>Add a 15/1 bit codec.<commit_after>
|
def encode(x):
"encode an integer using a 15-bit+continuation-bit encodeing"
r = []
while True:
r.append(x & 32767)
x >>= 15
if x == 0:
break
r = r[::-1]
n = len(r) - 1
i = 0
while i < n:
r[i] |= 32768
i += 1
return r
def encodeInto(x, r):
"encode an integer using a 7-bit+continuation-bit encodeing into an existing list"
n = 0
y = x
while True:
n += 1
y >>= 15
r.append(0)
if y == 0:
break
v = n
i = -1
m = 0
while n > 0:
r[i] = (x & 32767) | m
x >>= 15
i -= 1
m = 32768
n -= 1
def decode(itr):
"dencode an integer from a 7-bit+continuation-bit encodeing"
r = 0
x = itr.next()
r = (x & 32767)
while x & 32768:
x = itr.next()
r = (r << 15) | (x & 32767)
return r
|
Add a 15/1 bit codec.
def encode(x):
"encode an integer using a 15-bit+continuation-bit encodeing"
r = []
while True:
r.append(x & 32767)
x >>= 15
if x == 0:
break
r = r[::-1]
n = len(r) - 1
i = 0
while i < n:
r[i] |= 32768
i += 1
return r
def encodeInto(x, r):
"encode an integer using a 7-bit+continuation-bit encodeing into an existing list"
n = 0
y = x
while True:
n += 1
y >>= 15
r.append(0)
if y == 0:
break
v = n
i = -1
m = 0
while n > 0:
r[i] = (x & 32767) | m
x >>= 15
i -= 1
m = 32768
n -= 1
def decode(itr):
"dencode an integer from a 7-bit+continuation-bit encodeing"
r = 0
x = itr.next()
r = (x & 32767)
while x & 32768:
x = itr.next()
r = (r << 15) | (x & 32767)
return r
|
<commit_before><commit_msg>Add a 15/1 bit codec.<commit_after>
def encode(x):
"encode an integer using a 15-bit+continuation-bit encodeing"
r = []
while True:
r.append(x & 32767)
x >>= 15
if x == 0:
break
r = r[::-1]
n = len(r) - 1
i = 0
while i < n:
r[i] |= 32768
i += 1
return r
def encodeInto(x, r):
"encode an integer using a 7-bit+continuation-bit encodeing into an existing list"
n = 0
y = x
while True:
n += 1
y >>= 15
r.append(0)
if y == 0:
break
v = n
i = -1
m = 0
while n > 0:
r[i] = (x & 32767) | m
x >>= 15
i -= 1
m = 32768
n -= 1
def decode(itr):
"dencode an integer from a 7-bit+continuation-bit encodeing"
r = 0
x = itr.next()
r = (x & 32767)
while x & 32768:
x = itr.next()
r = (r << 15) | (x & 32767)
return r
|
|
a834d778e4ec72beb26ebc458bfc3975217fcf7b
|
scripts/mc_rtp_launch_record.py
|
scripts/mc_rtp_launch_record.py
|
#!/usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
# Copyright 2021 The HERA Collaboration
# Licensed under the 2-clause BSD license.
import numpy as np
from astropy.time import Time
from pyuvdata import UVData
from hera_mc import mc
from hera_mc.rtp import RTPLaunchRecord
ap = mc.get_mc_argument_parser()
ap.description = """Add or update the RTP Launch Record for an observation."""
ap.add_argument(
"files",
metavar="files",
type=str,
nargs="*",
default=[],
help="*.uvh5 files to process",
)
args = ap.parse_args()
db = mc.connect_to_mc_db(args)
for uvfile in args.files:
# assume our data is uvh5
uv = UVData()
uv.read(uvfile, read_data=False)
times = np.unique(uv.time_array)
starttime = Time(times[0], scale="utc", format="jd")
int_jd = int(np.floor(starttime.jd))
obsid = int(np.floor(starttime.gps))
obs_tag = uv.extra_keywords["tag"]
with db.sessionmaker() as session:
obs = session.get_obs(obsid)
if len(obs) == 0:
print(f"observation {obsid} not in M&C, skipping")
continue
query = mc.query(RTPLaunchRecord).filter(RTPLaunchRecord.obsid == obsid)
result = query.all()
if len(query) == 0:
# add a new launch record
mc.add_rtp_launch_record(obsid, int_jd, obs_tag)
else:
# update existing record
t0 = Time.now()
mc.update_rtp_launch_record(obsid, t0)
session.commit()
|
Add script for adding/updating RTP launch records
|
Add script for adding/updating RTP launch records
|
Python
|
bsd-2-clause
|
HERA-Team/hera_mc,HERA-Team/hera_mc
|
Add script for adding/updating RTP launch records
|
#!/usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
# Copyright 2021 The HERA Collaboration
# Licensed under the 2-clause BSD license.
import numpy as np
from astropy.time import Time
from pyuvdata import UVData
from hera_mc import mc
from hera_mc.rtp import RTPLaunchRecord
ap = mc.get_mc_argument_parser()
ap.description = """Add or update the RTP Launch Record for an observation."""
ap.add_argument(
"files",
metavar="files",
type=str,
nargs="*",
default=[],
help="*.uvh5 files to process",
)
args = ap.parse_args()
db = mc.connect_to_mc_db(args)
for uvfile in args.files:
# assume our data is uvh5
uv = UVData()
uv.read(uvfile, read_data=False)
times = np.unique(uv.time_array)
starttime = Time(times[0], scale="utc", format="jd")
int_jd = int(np.floor(starttime.jd))
obsid = int(np.floor(starttime.gps))
obs_tag = uv.extra_keywords["tag"]
with db.sessionmaker() as session:
obs = session.get_obs(obsid)
if len(obs) == 0:
print(f"observation {obsid} not in M&C, skipping")
continue
query = mc.query(RTPLaunchRecord).filter(RTPLaunchRecord.obsid == obsid)
result = query.all()
if len(query) == 0:
# add a new launch record
mc.add_rtp_launch_record(obsid, int_jd, obs_tag)
else:
# update existing record
t0 = Time.now()
mc.update_rtp_launch_record(obsid, t0)
session.commit()
|
<commit_before><commit_msg>Add script for adding/updating RTP launch records<commit_after>
|
#!/usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
# Copyright 2021 The HERA Collaboration
# Licensed under the 2-clause BSD license.
import numpy as np
from astropy.time import Time
from pyuvdata import UVData
from hera_mc import mc
from hera_mc.rtp import RTPLaunchRecord
ap = mc.get_mc_argument_parser()
ap.description = """Add or update the RTP Launch Record for an observation."""
ap.add_argument(
"files",
metavar="files",
type=str,
nargs="*",
default=[],
help="*.uvh5 files to process",
)
args = ap.parse_args()
db = mc.connect_to_mc_db(args)
for uvfile in args.files:
# assume our data is uvh5
uv = UVData()
uv.read(uvfile, read_data=False)
times = np.unique(uv.time_array)
starttime = Time(times[0], scale="utc", format="jd")
int_jd = int(np.floor(starttime.jd))
obsid = int(np.floor(starttime.gps))
obs_tag = uv.extra_keywords["tag"]
with db.sessionmaker() as session:
obs = session.get_obs(obsid)
if len(obs) == 0:
print(f"observation {obsid} not in M&C, skipping")
continue
query = mc.query(RTPLaunchRecord).filter(RTPLaunchRecord.obsid == obsid)
result = query.all()
if len(query) == 0:
# add a new launch record
mc.add_rtp_launch_record(obsid, int_jd, obs_tag)
else:
# update existing record
t0 = Time.now()
mc.update_rtp_launch_record(obsid, t0)
session.commit()
|
Add script for adding/updating RTP launch records#!/usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
# Copyright 2021 The HERA Collaboration
# Licensed under the 2-clause BSD license.
import numpy as np
from astropy.time import Time
from pyuvdata import UVData
from hera_mc import mc
from hera_mc.rtp import RTPLaunchRecord
ap = mc.get_mc_argument_parser()
ap.description = """Add or update the RTP Launch Record for an observation."""
ap.add_argument(
"files",
metavar="files",
type=str,
nargs="*",
default=[],
help="*.uvh5 files to process",
)
args = ap.parse_args()
db = mc.connect_to_mc_db(args)
for uvfile in args.files:
# assume our data is uvh5
uv = UVData()
uv.read(uvfile, read_data=False)
times = np.unique(uv.time_array)
starttime = Time(times[0], scale="utc", format="jd")
int_jd = int(np.floor(starttime.jd))
obsid = int(np.floor(starttime.gps))
obs_tag = uv.extra_keywords["tag"]
with db.sessionmaker() as session:
obs = session.get_obs(obsid)
if len(obs) == 0:
print(f"observation {obsid} not in M&C, skipping")
continue
query = mc.query(RTPLaunchRecord).filter(RTPLaunchRecord.obsid == obsid)
result = query.all()
if len(query) == 0:
# add a new launch record
mc.add_rtp_launch_record(obsid, int_jd, obs_tag)
else:
# update existing record
t0 = Time.now()
mc.update_rtp_launch_record(obsid, t0)
session.commit()
|
<commit_before><commit_msg>Add script for adding/updating RTP launch records<commit_after>#!/usr/bin/env python
# -*- mode: python; coding: utf-8 -*-
# Copyright 2021 The HERA Collaboration
# Licensed under the 2-clause BSD license.
import numpy as np
from astropy.time import Time
from pyuvdata import UVData
from hera_mc import mc
from hera_mc.rtp import RTPLaunchRecord
ap = mc.get_mc_argument_parser()
ap.description = """Add or update the RTP Launch Record for an observation."""
ap.add_argument(
"files",
metavar="files",
type=str,
nargs="*",
default=[],
help="*.uvh5 files to process",
)
args = ap.parse_args()
db = mc.connect_to_mc_db(args)
for uvfile in args.files:
# assume our data is uvh5
uv = UVData()
uv.read(uvfile, read_data=False)
times = np.unique(uv.time_array)
starttime = Time(times[0], scale="utc", format="jd")
int_jd = int(np.floor(starttime.jd))
obsid = int(np.floor(starttime.gps))
obs_tag = uv.extra_keywords["tag"]
with db.sessionmaker() as session:
obs = session.get_obs(obsid)
if len(obs) == 0:
print(f"observation {obsid} not in M&C, skipping")
continue
query = mc.query(RTPLaunchRecord).filter(RTPLaunchRecord.obsid == obsid)
result = query.all()
if len(query) == 0:
# add a new launch record
mc.add_rtp_launch_record(obsid, int_jd, obs_tag)
else:
# update existing record
t0 = Time.now()
mc.update_rtp_launch_record(obsid, t0)
session.commit()
|
|
61bcf2e6c47e0ade839d2712edc0e499d17bb552
|
tests/helpers.py
|
tests/helpers.py
|
# vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 Björn Ricks <bjoern.ricks@googlemail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
""" Helpers for the python-quilt test suite """
import unittest
class QuiltTest(unittest.TestCase):
""" Base class for all TestCases """
def assert_same_content(self, expected_file, actual_file):
expected = None
with open(expected_file, "r") as f:
expected = f.read()
with open(actual_fiel, "r") as f:
actual = f.read()
@classmethod
def suite(cls):
suite = unittest.TestSuite()
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(cls))
return suite
@classmethod
def run_tests(cls):
runner = unittest.TextTestRunner()
runner.run(cls.suite())
|
Add helper module for tests
|
Add helper module for tests
Currently the module contains a base class for all quilt tests.
|
Python
|
mit
|
vadmium/python-quilt,bjoernricks/python-quilt
|
Add helper module for tests
Currently the module contains a base class for all quilt tests.
|
# vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 Björn Ricks <bjoern.ricks@googlemail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
""" Helpers for the python-quilt test suite """
import unittest
class QuiltTest(unittest.TestCase):
""" Base class for all TestCases """
def assert_same_content(self, expected_file, actual_file):
expected = None
with open(expected_file, "r") as f:
expected = f.read()
with open(actual_fiel, "r") as f:
actual = f.read()
@classmethod
def suite(cls):
suite = unittest.TestSuite()
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(cls))
return suite
@classmethod
def run_tests(cls):
runner = unittest.TextTestRunner()
runner.run(cls.suite())
|
<commit_before><commit_msg>Add helper module for tests
Currently the module contains a base class for all quilt tests.<commit_after>
|
# vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 Björn Ricks <bjoern.ricks@googlemail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
""" Helpers for the python-quilt test suite """
import unittest
class QuiltTest(unittest.TestCase):
""" Base class for all TestCases """
def assert_same_content(self, expected_file, actual_file):
expected = None
with open(expected_file, "r") as f:
expected = f.read()
with open(actual_fiel, "r") as f:
actual = f.read()
@classmethod
def suite(cls):
suite = unittest.TestSuite()
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(cls))
return suite
@classmethod
def run_tests(cls):
runner = unittest.TextTestRunner()
runner.run(cls.suite())
|
Add helper module for tests
Currently the module contains a base class for all quilt tests.# vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 Björn Ricks <bjoern.ricks@googlemail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
""" Helpers for the python-quilt test suite """
import unittest
class QuiltTest(unittest.TestCase):
""" Base class for all TestCases """
def assert_same_content(self, expected_file, actual_file):
expected = None
with open(expected_file, "r") as f:
expected = f.read()
with open(actual_fiel, "r") as f:
actual = f.read()
@classmethod
def suite(cls):
suite = unittest.TestSuite()
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(cls))
return suite
@classmethod
def run_tests(cls):
runner = unittest.TextTestRunner()
runner.run(cls.suite())
|
<commit_before><commit_msg>Add helper module for tests
Currently the module contains a base class for all quilt tests.<commit_after># vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 Björn Ricks <bjoern.ricks@googlemail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
""" Helpers for the python-quilt test suite """
import unittest
class QuiltTest(unittest.TestCase):
""" Base class for all TestCases """
def assert_same_content(self, expected_file, actual_file):
expected = None
with open(expected_file, "r") as f:
expected = f.read()
with open(actual_fiel, "r") as f:
actual = f.read()
@classmethod
def suite(cls):
suite = unittest.TestSuite()
suite.addTest(unittest.defaultTestLoader.loadTestsFromTestCase(cls))
return suite
@classmethod
def run_tests(cls):
runner = unittest.TextTestRunner()
runner.run(cls.suite())
|
|
0774aa1c1bf64c39d3f456c6e85fc85403219f7f
|
examples/multiple_windows.py
|
examples/multiple_windows.py
|
import webview
import threading
"""
This example demonstrates how to create and manage multiple windows
"""
def create_new_window():
# Create new window and store its uid
child_window = webview.create_window("Window #2", width=800, height=400)
# Load content into both windows
webview.load_html('<h1>Master Window</h1>')
webview.load_html('<h1>Child Window</h1>', uid=child_window)
if __name__ == '__main__':
t = threading.Thread(target=create_new_window)
t.start()
# Master window
webview.create_window("Window #1", width=800, height=600)
|
Add a simple multiple-window example
|
[All] Add a simple multiple-window example
|
Python
|
bsd-3-clause
|
shivaprsdv/pywebview,r0x0r/pywebview,r0x0r/pywebview,r0x0r/pywebview,shivaprsdv/pywebview,r0x0r/pywebview,shivaprsdv/pywebview,shivaprsdv/pywebview,r0x0r/pywebview
|
[All] Add a simple multiple-window example
|
import webview
import threading
"""
This example demonstrates how to create and manage multiple windows
"""
def create_new_window():
# Create new window and store its uid
child_window = webview.create_window("Window #2", width=800, height=400)
# Load content into both windows
webview.load_html('<h1>Master Window</h1>')
webview.load_html('<h1>Child Window</h1>', uid=child_window)
if __name__ == '__main__':
t = threading.Thread(target=create_new_window)
t.start()
# Master window
webview.create_window("Window #1", width=800, height=600)
|
<commit_before><commit_msg>[All] Add a simple multiple-window example<commit_after>
|
import webview
import threading
"""
This example demonstrates how to create and manage multiple windows
"""
def create_new_window():
# Create new window and store its uid
child_window = webview.create_window("Window #2", width=800, height=400)
# Load content into both windows
webview.load_html('<h1>Master Window</h1>')
webview.load_html('<h1>Child Window</h1>', uid=child_window)
if __name__ == '__main__':
t = threading.Thread(target=create_new_window)
t.start()
# Master window
webview.create_window("Window #1", width=800, height=600)
|
[All] Add a simple multiple-window exampleimport webview
import threading
"""
This example demonstrates how to create and manage multiple windows
"""
def create_new_window():
# Create new window and store its uid
child_window = webview.create_window("Window #2", width=800, height=400)
# Load content into both windows
webview.load_html('<h1>Master Window</h1>')
webview.load_html('<h1>Child Window</h1>', uid=child_window)
if __name__ == '__main__':
t = threading.Thread(target=create_new_window)
t.start()
# Master window
webview.create_window("Window #1", width=800, height=600)
|
<commit_before><commit_msg>[All] Add a simple multiple-window example<commit_after>import webview
import threading
"""
This example demonstrates how to create and manage multiple windows
"""
def create_new_window():
# Create new window and store its uid
child_window = webview.create_window("Window #2", width=800, height=400)
# Load content into both windows
webview.load_html('<h1>Master Window</h1>')
webview.load_html('<h1>Child Window</h1>', uid=child_window)
if __name__ == '__main__':
t = threading.Thread(target=create_new_window)
t.start()
# Master window
webview.create_window("Window #1", width=800, height=600)
|
|
d58505e13edce70a34d4058dce4387d9ca43c911
|
bench/pact-suite/uts/uts_calc.py
|
bench/pact-suite/uts/uts_calc.py
|
# Estimate number of nodes for geometric uts with linear shape
b_0 = 4.0
gen_mx = 22
# level is from 0 onwards
def level_nodes(level):
branch_factors = [ b_0 * ( 1.0 - i / float(gen_mx)) for i in range(level + 1) ]
return reduce(lambda x, y: x*y, branch_factors)
print "b_0: %f gen_mx %d" % (b_0, gen_mx)
total = 0
for level in range(gen_mx+1):
this_level_count = level_nodes(level)
print "Level %d count: %d" % (level, this_level_count)
total += this_level_count
print "Total count: %d" % total
|
Add calculator for uts tree size
|
Add calculator for uts tree size
git-svn-id: 0c5512015aa96f7d3f5c3ad598bd98edc52008b1@12008 dc4e9af1-7f46-4ead-bba6-71afc04862de
|
Python
|
apache-2.0
|
swift-lang/swift-t,blue42u/swift-t,JohnPJenkins/swift-t,JohnPJenkins/swift-t,blue42u/swift-t,blue42u/swift-t,swift-lang/swift-t,JohnPJenkins/swift-t,swift-lang/swift-t,swift-lang/swift-t,JohnPJenkins/swift-t,JohnPJenkins/swift-t,basheersubei/swift-t,basheersubei/swift-t,basheersubei/swift-t,swift-lang/swift-t,JohnPJenkins/swift-t,basheersubei/swift-t,blue42u/swift-t,blue42u/swift-t,swift-lang/swift-t,basheersubei/swift-t,basheersubei/swift-t,basheersubei/swift-t,JohnPJenkins/swift-t,blue42u/swift-t,blue42u/swift-t,swift-lang/swift-t
|
Add calculator for uts tree size
git-svn-id: 0c5512015aa96f7d3f5c3ad598bd98edc52008b1@12008 dc4e9af1-7f46-4ead-bba6-71afc04862de
|
# Estimate number of nodes for geometric uts with linear shape
b_0 = 4.0
gen_mx = 22
# level is from 0 onwards
def level_nodes(level):
branch_factors = [ b_0 * ( 1.0 - i / float(gen_mx)) for i in range(level + 1) ]
return reduce(lambda x, y: x*y, branch_factors)
print "b_0: %f gen_mx %d" % (b_0, gen_mx)
total = 0
for level in range(gen_mx+1):
this_level_count = level_nodes(level)
print "Level %d count: %d" % (level, this_level_count)
total += this_level_count
print "Total count: %d" % total
|
<commit_before><commit_msg>Add calculator for uts tree size
git-svn-id: 0c5512015aa96f7d3f5c3ad598bd98edc52008b1@12008 dc4e9af1-7f46-4ead-bba6-71afc04862de<commit_after>
|
# Estimate number of nodes for geometric uts with linear shape
b_0 = 4.0
gen_mx = 22
# level is from 0 onwards
def level_nodes(level):
branch_factors = [ b_0 * ( 1.0 - i / float(gen_mx)) for i in range(level + 1) ]
return reduce(lambda x, y: x*y, branch_factors)
print "b_0: %f gen_mx %d" % (b_0, gen_mx)
total = 0
for level in range(gen_mx+1):
this_level_count = level_nodes(level)
print "Level %d count: %d" % (level, this_level_count)
total += this_level_count
print "Total count: %d" % total
|
Add calculator for uts tree size
git-svn-id: 0c5512015aa96f7d3f5c3ad598bd98edc52008b1@12008 dc4e9af1-7f46-4ead-bba6-71afc04862de
# Estimate number of nodes for geometric uts with linear shape
b_0 = 4.0
gen_mx = 22
# level is from 0 onwards
def level_nodes(level):
branch_factors = [ b_0 * ( 1.0 - i / float(gen_mx)) for i in range(level + 1) ]
return reduce(lambda x, y: x*y, branch_factors)
print "b_0: %f gen_mx %d" % (b_0, gen_mx)
total = 0
for level in range(gen_mx+1):
this_level_count = level_nodes(level)
print "Level %d count: %d" % (level, this_level_count)
total += this_level_count
print "Total count: %d" % total
|
<commit_before><commit_msg>Add calculator for uts tree size
git-svn-id: 0c5512015aa96f7d3f5c3ad598bd98edc52008b1@12008 dc4e9af1-7f46-4ead-bba6-71afc04862de<commit_after>
# Estimate number of nodes for geometric uts with linear shape
b_0 = 4.0
gen_mx = 22
# level is from 0 onwards
def level_nodes(level):
branch_factors = [ b_0 * ( 1.0 - i / float(gen_mx)) for i in range(level + 1) ]
return reduce(lambda x, y: x*y, branch_factors)
print "b_0: %f gen_mx %d" % (b_0, gen_mx)
total = 0
for level in range(gen_mx+1):
this_level_count = level_nodes(level)
print "Level %d count: %d" % (level, this_level_count)
total += this_level_count
print "Total count: %d" % total
|
|
d4443b05fbd48ca89d03942ef30bed1769948366
|
contrib_bots/bots/thesaurus/test_thesaurus.py
|
contrib_bots/bots/thesaurus/test_thesaurus.py
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
our_dir = os.path.dirname(os.path.abspath(__file__))
# For dev setups, we can find the API in the repo itself.
if os.path.exists(os.path.join(our_dir, '..')):
sys.path.insert(0, '..')
from bots_test_lib import BotTestCase
class TestThesaurusBot(BotTestCase):
bot_name = "thesaurus"
def test_bot(self):
self.assert_bot_output(
{'content': "synonym good", 'type': "private", 'sender_email': "foo"},
"great, satisfying, exceptional, positive, acceptable"
)
self.assert_bot_output(
{'content': "synonym nice", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"cordial, kind, good, okay, fair"
)
self.assert_bot_output(
{'content': "synonym foo", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"bar, thud, X, baz, corge"
)
self.assert_bot_output(
{'content': "antonym dirty", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"ordered, sterile, spotless, moral, clean"
)
self.assert_bot_output(
{'content': "antonym bar", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"loss, whole, advantage, aid, failure"
)
self.assert_bot_output(
{'content': "", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
("To use this bot, start messages with either "
"@mention-bot synonym (to get the synonyms of a given word) "
"or @mention-bot antonym (to get the antonyms of a given word). "
"Phrases are not accepted so only use single words "
"to search. For example you could search '@mention-bot synonym hello' "
"or '@mention-bot antonym goodbye'."),
)
|
Add tests for thesaurus bot in contrib_bots.
|
testsuite: Add tests for thesaurus bot in contrib_bots.
Add test file 'test_thesaurus.py'.
|
Python
|
apache-2.0
|
jackrzhang/zulip,punchagan/zulip,eeshangarg/zulip,brockwhittaker/zulip,zulip/zulip,amanharitsh123/zulip,brainwane/zulip,hackerkid/zulip,mahim97/zulip,showell/zulip,amanharitsh123/zulip,timabbott/zulip,verma-varsha/zulip,shubhamdhama/zulip,vaidap/zulip,shubhamdhama/zulip,mahim97/zulip,zulip/zulip,mahim97/zulip,dhcrzf/zulip,timabbott/zulip,brainwane/zulip,rht/zulip,Galexrt/zulip,rishig/zulip,showell/zulip,eeshangarg/zulip,vabs22/zulip,verma-varsha/zulip,andersk/zulip,vabs22/zulip,vabs22/zulip,Galexrt/zulip,brainwane/zulip,jackrzhang/zulip,jackrzhang/zulip,dhcrzf/zulip,andersk/zulip,brainwane/zulip,verma-varsha/zulip,shubhamdhama/zulip,rht/zulip,rishig/zulip,verma-varsha/zulip,dhcrzf/zulip,shubhamdhama/zulip,vabs22/zulip,jackrzhang/zulip,kou/zulip,jrowan/zulip,shubhamdhama/zulip,eeshangarg/zulip,zulip/zulip,timabbott/zulip,punchagan/zulip,synicalsyntax/zulip,vaidap/zulip,amanharitsh123/zulip,vaidap/zulip,tommyip/zulip,synicalsyntax/zulip,shubhamdhama/zulip,rishig/zulip,brockwhittaker/zulip,rht/zulip,punchagan/zulip,zulip/zulip,kou/zulip,tommyip/zulip,vaidap/zulip,vaidap/zulip,mahim97/zulip,amanharitsh123/zulip,hackerkid/zulip,dhcrzf/zulip,kou/zulip,eeshangarg/zulip,kou/zulip,zulip/zulip,rht/zulip,hackerkid/zulip,rishig/zulip,punchagan/zulip,timabbott/zulip,punchagan/zulip,dhcrzf/zulip,andersk/zulip,kou/zulip,Galexrt/zulip,brainwane/zulip,tommyip/zulip,brockwhittaker/zulip,rht/zulip,brockwhittaker/zulip,vabs22/zulip,rht/zulip,hackerkid/zulip,amanharitsh123/zulip,synicalsyntax/zulip,rht/zulip,jrowan/zulip,vabs22/zulip,andersk/zulip,kou/zulip,timabbott/zulip,shubhamdhama/zulip,tommyip/zulip,hackerkid/zulip,tommyip/zulip,mahim97/zulip,andersk/zulip,zulip/zulip,andersk/zulip,zulip/zulip,jackrzhang/zulip,hackerkid/zulip,rishig/zulip,andersk/zulip,brockwhittaker/zulip,brainwane/zulip,eeshangarg/zulip,punchagan/zulip,brainwane/zulip,jrowan/zulip,synicalsyntax/zulip,showell/zulip,jrowan/zulip,synicalsyntax/zulip,brockwhittaker/zulip,dhcrzf/zulip,jrowan/zulip,jrowan/zulip,hackerkid/zulip,rishig/zulip,showell/zulip,mahim97/zulip,Galexrt/zulip,eeshangarg/zulip,eeshangarg/zulip,synicalsyntax/zulip,timabbott/zulip,amanharitsh123/zulip,punchagan/zulip,rishig/zulip,tommyip/zulip,jackrzhang/zulip,showell/zulip,verma-varsha/zulip,vaidap/zulip,timabbott/zulip,Galexrt/zulip,verma-varsha/zulip,tommyip/zulip,dhcrzf/zulip,Galexrt/zulip,showell/zulip,Galexrt/zulip,showell/zulip,kou/zulip,synicalsyntax/zulip,jackrzhang/zulip
|
testsuite: Add tests for thesaurus bot in contrib_bots.
Add test file 'test_thesaurus.py'.
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
our_dir = os.path.dirname(os.path.abspath(__file__))
# For dev setups, we can find the API in the repo itself.
if os.path.exists(os.path.join(our_dir, '..')):
sys.path.insert(0, '..')
from bots_test_lib import BotTestCase
class TestThesaurusBot(BotTestCase):
bot_name = "thesaurus"
def test_bot(self):
self.assert_bot_output(
{'content': "synonym good", 'type': "private", 'sender_email': "foo"},
"great, satisfying, exceptional, positive, acceptable"
)
self.assert_bot_output(
{'content': "synonym nice", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"cordial, kind, good, okay, fair"
)
self.assert_bot_output(
{'content': "synonym foo", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"bar, thud, X, baz, corge"
)
self.assert_bot_output(
{'content': "antonym dirty", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"ordered, sterile, spotless, moral, clean"
)
self.assert_bot_output(
{'content': "antonym bar", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"loss, whole, advantage, aid, failure"
)
self.assert_bot_output(
{'content': "", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
("To use this bot, start messages with either "
"@mention-bot synonym (to get the synonyms of a given word) "
"or @mention-bot antonym (to get the antonyms of a given word). "
"Phrases are not accepted so only use single words "
"to search. For example you could search '@mention-bot synonym hello' "
"or '@mention-bot antonym goodbye'."),
)
|
<commit_before><commit_msg>testsuite: Add tests for thesaurus bot in contrib_bots.
Add test file 'test_thesaurus.py'.<commit_after>
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
our_dir = os.path.dirname(os.path.abspath(__file__))
# For dev setups, we can find the API in the repo itself.
if os.path.exists(os.path.join(our_dir, '..')):
sys.path.insert(0, '..')
from bots_test_lib import BotTestCase
class TestThesaurusBot(BotTestCase):
bot_name = "thesaurus"
def test_bot(self):
self.assert_bot_output(
{'content': "synonym good", 'type': "private", 'sender_email': "foo"},
"great, satisfying, exceptional, positive, acceptable"
)
self.assert_bot_output(
{'content': "synonym nice", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"cordial, kind, good, okay, fair"
)
self.assert_bot_output(
{'content': "synonym foo", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"bar, thud, X, baz, corge"
)
self.assert_bot_output(
{'content': "antonym dirty", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"ordered, sterile, spotless, moral, clean"
)
self.assert_bot_output(
{'content': "antonym bar", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"loss, whole, advantage, aid, failure"
)
self.assert_bot_output(
{'content': "", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
("To use this bot, start messages with either "
"@mention-bot synonym (to get the synonyms of a given word) "
"or @mention-bot antonym (to get the antonyms of a given word). "
"Phrases are not accepted so only use single words "
"to search. For example you could search '@mention-bot synonym hello' "
"or '@mention-bot antonym goodbye'."),
)
|
testsuite: Add tests for thesaurus bot in contrib_bots.
Add test file 'test_thesaurus.py'.#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
our_dir = os.path.dirname(os.path.abspath(__file__))
# For dev setups, we can find the API in the repo itself.
if os.path.exists(os.path.join(our_dir, '..')):
sys.path.insert(0, '..')
from bots_test_lib import BotTestCase
class TestThesaurusBot(BotTestCase):
bot_name = "thesaurus"
def test_bot(self):
self.assert_bot_output(
{'content': "synonym good", 'type': "private", 'sender_email': "foo"},
"great, satisfying, exceptional, positive, acceptable"
)
self.assert_bot_output(
{'content': "synonym nice", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"cordial, kind, good, okay, fair"
)
self.assert_bot_output(
{'content': "synonym foo", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"bar, thud, X, baz, corge"
)
self.assert_bot_output(
{'content': "antonym dirty", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"ordered, sterile, spotless, moral, clean"
)
self.assert_bot_output(
{'content': "antonym bar", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"loss, whole, advantage, aid, failure"
)
self.assert_bot_output(
{'content': "", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
("To use this bot, start messages with either "
"@mention-bot synonym (to get the synonyms of a given word) "
"or @mention-bot antonym (to get the antonyms of a given word). "
"Phrases are not accepted so only use single words "
"to search. For example you could search '@mention-bot synonym hello' "
"or '@mention-bot antonym goodbye'."),
)
|
<commit_before><commit_msg>testsuite: Add tests for thesaurus bot in contrib_bots.
Add test file 'test_thesaurus.py'.<commit_after>#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import print_function
import os
import sys
our_dir = os.path.dirname(os.path.abspath(__file__))
# For dev setups, we can find the API in the repo itself.
if os.path.exists(os.path.join(our_dir, '..')):
sys.path.insert(0, '..')
from bots_test_lib import BotTestCase
class TestThesaurusBot(BotTestCase):
bot_name = "thesaurus"
def test_bot(self):
self.assert_bot_output(
{'content': "synonym good", 'type': "private", 'sender_email': "foo"},
"great, satisfying, exceptional, positive, acceptable"
)
self.assert_bot_output(
{'content': "synonym nice", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"cordial, kind, good, okay, fair"
)
self.assert_bot_output(
{'content': "synonym foo", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"bar, thud, X, baz, corge"
)
self.assert_bot_output(
{'content': "antonym dirty", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"ordered, sterile, spotless, moral, clean"
)
self.assert_bot_output(
{'content': "antonym bar", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
"loss, whole, advantage, aid, failure"
)
self.assert_bot_output(
{'content': "", 'type': "stream", 'display_recipient': "foo", 'subject': "foo"},
("To use this bot, start messages with either "
"@mention-bot synonym (to get the synonyms of a given word) "
"or @mention-bot antonym (to get the antonyms of a given word). "
"Phrases are not accepted so only use single words "
"to search. For example you could search '@mention-bot synonym hello' "
"or '@mention-bot antonym goodbye'."),
)
|
|
25c215df5550e34de21bfde7416ed43286d70ef2
|
core/migrations/0049_providerconfiguration.py
|
core/migrations/0049_providerconfiguration.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def create_provider_configuration(apps, schema_editor):
Provider = apps.get_model("core", "Provider")
ProviderConfiguration = apps.get_model("core", "ProviderConfiguration")
providers = Provider.objects.all()
for provider in providers:
configuration, created = ProviderConfiguration.objects.get_or_create(provider=provider)
# if created:
# print "New configuration: %s" % configuration
return
class Migration(migrations.Migration):
dependencies = [
('core', '0048_helplink'),
]
operations = [
migrations.CreateModel(
name='ProviderConfiguration',
fields=[
('provider', models.OneToOneField(related_name='configuration', primary_key=True, serialize=False, to='core.Provider')),
('timezone', models.CharField(default=b'Etc/UTC', max_length=64)),
],
options={
'db_table': 'provider_configuration',
},
),
migrations.RunPython(create_provider_configuration)
]
|
Add 0049 - ProviderConfiguration migration+RunPython
|
Add 0049 - ProviderConfiguration migration+RunPython
|
Python
|
apache-2.0
|
CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend
|
Add 0049 - ProviderConfiguration migration+RunPython
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def create_provider_configuration(apps, schema_editor):
Provider = apps.get_model("core", "Provider")
ProviderConfiguration = apps.get_model("core", "ProviderConfiguration")
providers = Provider.objects.all()
for provider in providers:
configuration, created = ProviderConfiguration.objects.get_or_create(provider=provider)
# if created:
# print "New configuration: %s" % configuration
return
class Migration(migrations.Migration):
dependencies = [
('core', '0048_helplink'),
]
operations = [
migrations.CreateModel(
name='ProviderConfiguration',
fields=[
('provider', models.OneToOneField(related_name='configuration', primary_key=True, serialize=False, to='core.Provider')),
('timezone', models.CharField(default=b'Etc/UTC', max_length=64)),
],
options={
'db_table': 'provider_configuration',
},
),
migrations.RunPython(create_provider_configuration)
]
|
<commit_before><commit_msg>Add 0049 - ProviderConfiguration migration+RunPython<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def create_provider_configuration(apps, schema_editor):
Provider = apps.get_model("core", "Provider")
ProviderConfiguration = apps.get_model("core", "ProviderConfiguration")
providers = Provider.objects.all()
for provider in providers:
configuration, created = ProviderConfiguration.objects.get_or_create(provider=provider)
# if created:
# print "New configuration: %s" % configuration
return
class Migration(migrations.Migration):
dependencies = [
('core', '0048_helplink'),
]
operations = [
migrations.CreateModel(
name='ProviderConfiguration',
fields=[
('provider', models.OneToOneField(related_name='configuration', primary_key=True, serialize=False, to='core.Provider')),
('timezone', models.CharField(default=b'Etc/UTC', max_length=64)),
],
options={
'db_table': 'provider_configuration',
},
),
migrations.RunPython(create_provider_configuration)
]
|
Add 0049 - ProviderConfiguration migration+RunPython# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def create_provider_configuration(apps, schema_editor):
Provider = apps.get_model("core", "Provider")
ProviderConfiguration = apps.get_model("core", "ProviderConfiguration")
providers = Provider.objects.all()
for provider in providers:
configuration, created = ProviderConfiguration.objects.get_or_create(provider=provider)
# if created:
# print "New configuration: %s" % configuration
return
class Migration(migrations.Migration):
dependencies = [
('core', '0048_helplink'),
]
operations = [
migrations.CreateModel(
name='ProviderConfiguration',
fields=[
('provider', models.OneToOneField(related_name='configuration', primary_key=True, serialize=False, to='core.Provider')),
('timezone', models.CharField(default=b'Etc/UTC', max_length=64)),
],
options={
'db_table': 'provider_configuration',
},
),
migrations.RunPython(create_provider_configuration)
]
|
<commit_before><commit_msg>Add 0049 - ProviderConfiguration migration+RunPython<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def create_provider_configuration(apps, schema_editor):
Provider = apps.get_model("core", "Provider")
ProviderConfiguration = apps.get_model("core", "ProviderConfiguration")
providers = Provider.objects.all()
for provider in providers:
configuration, created = ProviderConfiguration.objects.get_or_create(provider=provider)
# if created:
# print "New configuration: %s" % configuration
return
class Migration(migrations.Migration):
dependencies = [
('core', '0048_helplink'),
]
operations = [
migrations.CreateModel(
name='ProviderConfiguration',
fields=[
('provider', models.OneToOneField(related_name='configuration', primary_key=True, serialize=False, to='core.Provider')),
('timezone', models.CharField(default=b'Etc/UTC', max_length=64)),
],
options={
'db_table': 'provider_configuration',
},
),
migrations.RunPython(create_provider_configuration)
]
|
|
387873ef2a953f89d7b98cade3a87b1339ff9ca6
|
scripts/external_test_allocation.py
|
scripts/external_test_allocation.py
|
#!/usr/bin/env python
from django import setup
setup()
from service.monitoring import get_allocation_result_for
from core.models import Identity, Instance, AtmosphereUser
from django.utils.timezone import timedelta, datetime
import pytz, sys
from dateutil.parser import parse
if len(sys.argv) < 3:
print "Invalid # of args"
print "Usage: %s <username> <start> <end>" % sys.argv[0]
print "All times assumed to be in utc"
sys.exit(1)
def utc_parse(date_str):
dt = parse(date_str)
return pytz.utc.localize(dt)
username=sys.argv[1]
start = utc_parse(sys.argv[2])
end = utc_parse(sys.argv[3])
print "Testing %s from %s - %s" % (username, start, end)
ident = Identity.objects.get(provider__id=4, created_by__username=username)
print get_allocation_result_for(ident.provider, ident.created_by, True, start, end)
|
Add a script to call for allocation result externally
|
Add a script to call for allocation result externally
|
Python
|
apache-2.0
|
CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend,CCI-MOC/GUI-Backend
|
Add a script to call for allocation result externally
|
#!/usr/bin/env python
from django import setup
setup()
from service.monitoring import get_allocation_result_for
from core.models import Identity, Instance, AtmosphereUser
from django.utils.timezone import timedelta, datetime
import pytz, sys
from dateutil.parser import parse
if len(sys.argv) < 3:
print "Invalid # of args"
print "Usage: %s <username> <start> <end>" % sys.argv[0]
print "All times assumed to be in utc"
sys.exit(1)
def utc_parse(date_str):
dt = parse(date_str)
return pytz.utc.localize(dt)
username=sys.argv[1]
start = utc_parse(sys.argv[2])
end = utc_parse(sys.argv[3])
print "Testing %s from %s - %s" % (username, start, end)
ident = Identity.objects.get(provider__id=4, created_by__username=username)
print get_allocation_result_for(ident.provider, ident.created_by, True, start, end)
|
<commit_before><commit_msg>Add a script to call for allocation result externally<commit_after>
|
#!/usr/bin/env python
from django import setup
setup()
from service.monitoring import get_allocation_result_for
from core.models import Identity, Instance, AtmosphereUser
from django.utils.timezone import timedelta, datetime
import pytz, sys
from dateutil.parser import parse
if len(sys.argv) < 3:
print "Invalid # of args"
print "Usage: %s <username> <start> <end>" % sys.argv[0]
print "All times assumed to be in utc"
sys.exit(1)
def utc_parse(date_str):
dt = parse(date_str)
return pytz.utc.localize(dt)
username=sys.argv[1]
start = utc_parse(sys.argv[2])
end = utc_parse(sys.argv[3])
print "Testing %s from %s - %s" % (username, start, end)
ident = Identity.objects.get(provider__id=4, created_by__username=username)
print get_allocation_result_for(ident.provider, ident.created_by, True, start, end)
|
Add a script to call for allocation result externally#!/usr/bin/env python
from django import setup
setup()
from service.monitoring import get_allocation_result_for
from core.models import Identity, Instance, AtmosphereUser
from django.utils.timezone import timedelta, datetime
import pytz, sys
from dateutil.parser import parse
if len(sys.argv) < 3:
print "Invalid # of args"
print "Usage: %s <username> <start> <end>" % sys.argv[0]
print "All times assumed to be in utc"
sys.exit(1)
def utc_parse(date_str):
dt = parse(date_str)
return pytz.utc.localize(dt)
username=sys.argv[1]
start = utc_parse(sys.argv[2])
end = utc_parse(sys.argv[3])
print "Testing %s from %s - %s" % (username, start, end)
ident = Identity.objects.get(provider__id=4, created_by__username=username)
print get_allocation_result_for(ident.provider, ident.created_by, True, start, end)
|
<commit_before><commit_msg>Add a script to call for allocation result externally<commit_after>#!/usr/bin/env python
from django import setup
setup()
from service.monitoring import get_allocation_result_for
from core.models import Identity, Instance, AtmosphereUser
from django.utils.timezone import timedelta, datetime
import pytz, sys
from dateutil.parser import parse
if len(sys.argv) < 3:
print "Invalid # of args"
print "Usage: %s <username> <start> <end>" % sys.argv[0]
print "All times assumed to be in utc"
sys.exit(1)
def utc_parse(date_str):
dt = parse(date_str)
return pytz.utc.localize(dt)
username=sys.argv[1]
start = utc_parse(sys.argv[2])
end = utc_parse(sys.argv[3])
print "Testing %s from %s - %s" % (username, start, end)
ident = Identity.objects.get(provider__id=4, created_by__username=username)
print get_allocation_result_for(ident.provider, ident.created_by, True, start, end)
|
|
45d7a6ed3a68793585b24474753cec2a01fc6c03
|
Largest_Palindrome_Product.py
|
Largest_Palindrome_Product.py
|
# Find the largest palindrome made from the product of two n-digit numbers.
# Since the result could be very large, you should return the largest palindrome mod 1337.
# Example:
# Input: 2
# Output: 987
# Explanation: 99 x 91 = 9009, 9009 % 1337 = 987
# Note:
# The range of n is [1,8].
def largestPalindrome(n):
"""
:type n: int
:rtype: int
"""
number1 = ""
number2 = ""
for x in range(n):
number1 += "9"
number2 += "9"
number1 = int(number1)
number2 = int(number2)
palindrome = 0
for x in range(number1 + 1):
for i in range(number2 + 1):
product = x * i
if (str(product) == str(product)[::-1]) and product > palindrome:
palindrome = product
return palindrome % 1337
n = 2
print(largestPalindrome(n))
|
Solve Largest Palindrome Product with brute force (Time Limit Exceeded)
|
Solve Largest Palindrome Product with brute force (Time Limit Exceeded)
|
Python
|
mit
|
Kunal57/Python_Algorithms
|
Solve Largest Palindrome Product with brute force (Time Limit Exceeded)
|
# Find the largest palindrome made from the product of two n-digit numbers.
# Since the result could be very large, you should return the largest palindrome mod 1337.
# Example:
# Input: 2
# Output: 987
# Explanation: 99 x 91 = 9009, 9009 % 1337 = 987
# Note:
# The range of n is [1,8].
def largestPalindrome(n):
"""
:type n: int
:rtype: int
"""
number1 = ""
number2 = ""
for x in range(n):
number1 += "9"
number2 += "9"
number1 = int(number1)
number2 = int(number2)
palindrome = 0
for x in range(number1 + 1):
for i in range(number2 + 1):
product = x * i
if (str(product) == str(product)[::-1]) and product > palindrome:
palindrome = product
return palindrome % 1337
n = 2
print(largestPalindrome(n))
|
<commit_before><commit_msg>Solve Largest Palindrome Product with brute force (Time Limit Exceeded)<commit_after>
|
# Find the largest palindrome made from the product of two n-digit numbers.
# Since the result could be very large, you should return the largest palindrome mod 1337.
# Example:
# Input: 2
# Output: 987
# Explanation: 99 x 91 = 9009, 9009 % 1337 = 987
# Note:
# The range of n is [1,8].
def largestPalindrome(n):
"""
:type n: int
:rtype: int
"""
number1 = ""
number2 = ""
for x in range(n):
number1 += "9"
number2 += "9"
number1 = int(number1)
number2 = int(number2)
palindrome = 0
for x in range(number1 + 1):
for i in range(number2 + 1):
product = x * i
if (str(product) == str(product)[::-1]) and product > palindrome:
palindrome = product
return palindrome % 1337
n = 2
print(largestPalindrome(n))
|
Solve Largest Palindrome Product with brute force (Time Limit Exceeded)# Find the largest palindrome made from the product of two n-digit numbers.
# Since the result could be very large, you should return the largest palindrome mod 1337.
# Example:
# Input: 2
# Output: 987
# Explanation: 99 x 91 = 9009, 9009 % 1337 = 987
# Note:
# The range of n is [1,8].
def largestPalindrome(n):
"""
:type n: int
:rtype: int
"""
number1 = ""
number2 = ""
for x in range(n):
number1 += "9"
number2 += "9"
number1 = int(number1)
number2 = int(number2)
palindrome = 0
for x in range(number1 + 1):
for i in range(number2 + 1):
product = x * i
if (str(product) == str(product)[::-1]) and product > palindrome:
palindrome = product
return palindrome % 1337
n = 2
print(largestPalindrome(n))
|
<commit_before><commit_msg>Solve Largest Palindrome Product with brute force (Time Limit Exceeded)<commit_after># Find the largest palindrome made from the product of two n-digit numbers.
# Since the result could be very large, you should return the largest palindrome mod 1337.
# Example:
# Input: 2
# Output: 987
# Explanation: 99 x 91 = 9009, 9009 % 1337 = 987
# Note:
# The range of n is [1,8].
def largestPalindrome(n):
"""
:type n: int
:rtype: int
"""
number1 = ""
number2 = ""
for x in range(n):
number1 += "9"
number2 += "9"
number1 = int(number1)
number2 = int(number2)
palindrome = 0
for x in range(number1 + 1):
for i in range(number2 + 1):
product = x * i
if (str(product) == str(product)[::-1]) and product > palindrome:
palindrome = product
return palindrome % 1337
n = 2
print(largestPalindrome(n))
|
|
c5e0fe1845f2f503f59279305c3a41e1d6b70aa8
|
localore/people/migrations/0010_auto_20160516_1123.py
|
localore/people/migrations/0010_auto_20160516_1123.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0009_auto_20160408_1535'),
]
operations = [
migrations.AlterModelOptions(
name='person',
options={'ordering': ('last_name',), 'verbose_name': 'Team Member'},
),
]
|
Add missed migration for 12d09f5.
|
Add missed migration for 12d09f5.
|
Python
|
mpl-2.0
|
ghostwords/localore,ghostwords/localore,ghostwords/localore
|
Add missed migration for 12d09f5.
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0009_auto_20160408_1535'),
]
operations = [
migrations.AlterModelOptions(
name='person',
options={'ordering': ('last_name',), 'verbose_name': 'Team Member'},
),
]
|
<commit_before><commit_msg>Add missed migration for 12d09f5.<commit_after>
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0009_auto_20160408_1535'),
]
operations = [
migrations.AlterModelOptions(
name='person',
options={'ordering': ('last_name',), 'verbose_name': 'Team Member'},
),
]
|
Add missed migration for 12d09f5.# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0009_auto_20160408_1535'),
]
operations = [
migrations.AlterModelOptions(
name='person',
options={'ordering': ('last_name',), 'verbose_name': 'Team Member'},
),
]
|
<commit_before><commit_msg>Add missed migration for 12d09f5.<commit_after># -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('people', '0009_auto_20160408_1535'),
]
operations = [
migrations.AlterModelOptions(
name='person',
options={'ordering': ('last_name',), 'verbose_name': 'Team Member'},
),
]
|
|
d67d5fc6de19f2a1191e7289fcf8b96116eb1cf9
|
custom/icds/tests/test_views.py
|
custom/icds/tests/test_views.py
|
from __future__ import absolute_import
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
class TestViews(TestCase):
@override_settings(CUSTOM_LANDING_TEMPLATE='icds/login.html')
def test_custom_login(self):
response = self.client.get(reverse("login"), follow=False)
self.assertEqual(response.status_code, 200)
|
Test ICDS login doesn't 500
|
Test ICDS login doesn't 500
|
Python
|
bsd-3-clause
|
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
|
Test ICDS login doesn't 500
|
from __future__ import absolute_import
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
class TestViews(TestCase):
@override_settings(CUSTOM_LANDING_TEMPLATE='icds/login.html')
def test_custom_login(self):
response = self.client.get(reverse("login"), follow=False)
self.assertEqual(response.status_code, 200)
|
<commit_before><commit_msg>Test ICDS login doesn't 500<commit_after>
|
from __future__ import absolute_import
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
class TestViews(TestCase):
@override_settings(CUSTOM_LANDING_TEMPLATE='icds/login.html')
def test_custom_login(self):
response = self.client.get(reverse("login"), follow=False)
self.assertEqual(response.status_code, 200)
|
Test ICDS login doesn't 500from __future__ import absolute_import
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
class TestViews(TestCase):
@override_settings(CUSTOM_LANDING_TEMPLATE='icds/login.html')
def test_custom_login(self):
response = self.client.get(reverse("login"), follow=False)
self.assertEqual(response.status_code, 200)
|
<commit_before><commit_msg>Test ICDS login doesn't 500<commit_after>from __future__ import absolute_import
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
class TestViews(TestCase):
@override_settings(CUSTOM_LANDING_TEMPLATE='icds/login.html')
def test_custom_login(self):
response = self.client.get(reverse("login"), follow=False)
self.assertEqual(response.status_code, 200)
|
|
dd2b861cb1576943bb290bede5544eaec146bdbf
|
som_generationkwh/migrations/0.0.1.2/pre-0001_delete_inactive_investments.py
|
som_generationkwh/migrations/0.0.1.2/pre-0001_delete_inactive_investments.py
|
# coding=utf-8
from oopgrade import oopgrade
import netsvc
def migrate(cr,v):
print "somenergia-generationkwh_0.0.1.2: Hem entrat al Migrate"
return
def up(cursor, installed_version):
logger= netsvc.Logger()
print "somenergia-generationkwh_0.0.1.2: Hem entrat al UP"
if not installed_version:
return
logger.notifyChannel('migration', netsvc.LOG_INFO, 'Changing ir_model_data from giscedata_facturacio_comer to giscedata_facturacio')
'''cursor.execute("delete
from generationkwh_investment
where id in (
select
inv.id as id
from
generationkwh_investment as inv
left join
account_move_line as ml
on
inv.move_line_id = ml.id
left join
account_period as p
on
p.id = ml.period_id
where
p.special and
not inv.active")'''
logger.notifyChannel('migration', netsvc.LOG_INFO, 'Succesfully changed')
def down(cursor):
print "somenergia-generationkwh_0.0.1.2: Hem entrat al Down"
pass
# vim: ts=4 sw=4 et
|
Add second migration, it works!
|
Add second migration, it works!
|
Python
|
agpl-3.0
|
Som-Energia/somenergia-generationkwh,Som-Energia/somenergia-generationkwh
|
Add second migration, it works!
|
# coding=utf-8
from oopgrade import oopgrade
import netsvc
def migrate(cr,v):
print "somenergia-generationkwh_0.0.1.2: Hem entrat al Migrate"
return
def up(cursor, installed_version):
logger= netsvc.Logger()
print "somenergia-generationkwh_0.0.1.2: Hem entrat al UP"
if not installed_version:
return
logger.notifyChannel('migration', netsvc.LOG_INFO, 'Changing ir_model_data from giscedata_facturacio_comer to giscedata_facturacio')
'''cursor.execute("delete
from generationkwh_investment
where id in (
select
inv.id as id
from
generationkwh_investment as inv
left join
account_move_line as ml
on
inv.move_line_id = ml.id
left join
account_period as p
on
p.id = ml.period_id
where
p.special and
not inv.active")'''
logger.notifyChannel('migration', netsvc.LOG_INFO, 'Succesfully changed')
def down(cursor):
print "somenergia-generationkwh_0.0.1.2: Hem entrat al Down"
pass
# vim: ts=4 sw=4 et
|
<commit_before><commit_msg>Add second migration, it works!<commit_after>
|
# coding=utf-8
from oopgrade import oopgrade
import netsvc
def migrate(cr,v):
print "somenergia-generationkwh_0.0.1.2: Hem entrat al Migrate"
return
def up(cursor, installed_version):
logger= netsvc.Logger()
print "somenergia-generationkwh_0.0.1.2: Hem entrat al UP"
if not installed_version:
return
logger.notifyChannel('migration', netsvc.LOG_INFO, 'Changing ir_model_data from giscedata_facturacio_comer to giscedata_facturacio')
'''cursor.execute("delete
from generationkwh_investment
where id in (
select
inv.id as id
from
generationkwh_investment as inv
left join
account_move_line as ml
on
inv.move_line_id = ml.id
left join
account_period as p
on
p.id = ml.period_id
where
p.special and
not inv.active")'''
logger.notifyChannel('migration', netsvc.LOG_INFO, 'Succesfully changed')
def down(cursor):
print "somenergia-generationkwh_0.0.1.2: Hem entrat al Down"
pass
# vim: ts=4 sw=4 et
|
Add second migration, it works!# coding=utf-8
from oopgrade import oopgrade
import netsvc
def migrate(cr,v):
print "somenergia-generationkwh_0.0.1.2: Hem entrat al Migrate"
return
def up(cursor, installed_version):
logger= netsvc.Logger()
print "somenergia-generationkwh_0.0.1.2: Hem entrat al UP"
if not installed_version:
return
logger.notifyChannel('migration', netsvc.LOG_INFO, 'Changing ir_model_data from giscedata_facturacio_comer to giscedata_facturacio')
'''cursor.execute("delete
from generationkwh_investment
where id in (
select
inv.id as id
from
generationkwh_investment as inv
left join
account_move_line as ml
on
inv.move_line_id = ml.id
left join
account_period as p
on
p.id = ml.period_id
where
p.special and
not inv.active")'''
logger.notifyChannel('migration', netsvc.LOG_INFO, 'Succesfully changed')
def down(cursor):
print "somenergia-generationkwh_0.0.1.2: Hem entrat al Down"
pass
# vim: ts=4 sw=4 et
|
<commit_before><commit_msg>Add second migration, it works!<commit_after># coding=utf-8
from oopgrade import oopgrade
import netsvc
def migrate(cr,v):
print "somenergia-generationkwh_0.0.1.2: Hem entrat al Migrate"
return
def up(cursor, installed_version):
logger= netsvc.Logger()
print "somenergia-generationkwh_0.0.1.2: Hem entrat al UP"
if not installed_version:
return
logger.notifyChannel('migration', netsvc.LOG_INFO, 'Changing ir_model_data from giscedata_facturacio_comer to giscedata_facturacio')
'''cursor.execute("delete
from generationkwh_investment
where id in (
select
inv.id as id
from
generationkwh_investment as inv
left join
account_move_line as ml
on
inv.move_line_id = ml.id
left join
account_period as p
on
p.id = ml.period_id
where
p.special and
not inv.active")'''
logger.notifyChannel('migration', netsvc.LOG_INFO, 'Succesfully changed')
def down(cursor):
print "somenergia-generationkwh_0.0.1.2: Hem entrat al Down"
pass
# vim: ts=4 sw=4 et
|
|
d44b79541dae38b8deabaab4e7fe990ce1cbfa95
|
opal/tests/test_templatetags_patient_lists.py
|
opal/tests/test_templatetags_patient_lists.py
|
"""
Unittests for opal.templatetags.patient_lists
"""
from mock import MagicMock
from opal.core.test import OpalTestCase
from opal.tests.test_patient_lists import TestTabbedPatientListGroup, TaggingTestPatientList
from opal.templatetags import patient_lists
class TabbedListGroupTestCase(OpalTestCase):
def test_context(self):
request = MagicMock(name='Mock Request')
request.user = self.user
mock_context = dict(
list_group=TestTabbedPatientListGroup,
patient_list=TaggingTestPatientList,
request=request
)
ctx = patient_lists.tabbed_list_group(mock_context)
self.assertEqual(TaggingTestPatientList, ctx['active_list'])
expected_members = list(TestTabbedPatientListGroup.get_member_lists_for_user(self.user))
self.assertEqual(expected_members, list(ctx['members']))
|
Add tests for patient lists templatetags
|
Add tests for patient lists templatetags
|
Python
|
agpl-3.0
|
khchine5/opal,khchine5/opal,khchine5/opal
|
Add tests for patient lists templatetags
|
"""
Unittests for opal.templatetags.patient_lists
"""
from mock import MagicMock
from opal.core.test import OpalTestCase
from opal.tests.test_patient_lists import TestTabbedPatientListGroup, TaggingTestPatientList
from opal.templatetags import patient_lists
class TabbedListGroupTestCase(OpalTestCase):
def test_context(self):
request = MagicMock(name='Mock Request')
request.user = self.user
mock_context = dict(
list_group=TestTabbedPatientListGroup,
patient_list=TaggingTestPatientList,
request=request
)
ctx = patient_lists.tabbed_list_group(mock_context)
self.assertEqual(TaggingTestPatientList, ctx['active_list'])
expected_members = list(TestTabbedPatientListGroup.get_member_lists_for_user(self.user))
self.assertEqual(expected_members, list(ctx['members']))
|
<commit_before><commit_msg>Add tests for patient lists templatetags<commit_after>
|
"""
Unittests for opal.templatetags.patient_lists
"""
from mock import MagicMock
from opal.core.test import OpalTestCase
from opal.tests.test_patient_lists import TestTabbedPatientListGroup, TaggingTestPatientList
from opal.templatetags import patient_lists
class TabbedListGroupTestCase(OpalTestCase):
def test_context(self):
request = MagicMock(name='Mock Request')
request.user = self.user
mock_context = dict(
list_group=TestTabbedPatientListGroup,
patient_list=TaggingTestPatientList,
request=request
)
ctx = patient_lists.tabbed_list_group(mock_context)
self.assertEqual(TaggingTestPatientList, ctx['active_list'])
expected_members = list(TestTabbedPatientListGroup.get_member_lists_for_user(self.user))
self.assertEqual(expected_members, list(ctx['members']))
|
Add tests for patient lists templatetags"""
Unittests for opal.templatetags.patient_lists
"""
from mock import MagicMock
from opal.core.test import OpalTestCase
from opal.tests.test_patient_lists import TestTabbedPatientListGroup, TaggingTestPatientList
from opal.templatetags import patient_lists
class TabbedListGroupTestCase(OpalTestCase):
def test_context(self):
request = MagicMock(name='Mock Request')
request.user = self.user
mock_context = dict(
list_group=TestTabbedPatientListGroup,
patient_list=TaggingTestPatientList,
request=request
)
ctx = patient_lists.tabbed_list_group(mock_context)
self.assertEqual(TaggingTestPatientList, ctx['active_list'])
expected_members = list(TestTabbedPatientListGroup.get_member_lists_for_user(self.user))
self.assertEqual(expected_members, list(ctx['members']))
|
<commit_before><commit_msg>Add tests for patient lists templatetags<commit_after>"""
Unittests for opal.templatetags.patient_lists
"""
from mock import MagicMock
from opal.core.test import OpalTestCase
from opal.tests.test_patient_lists import TestTabbedPatientListGroup, TaggingTestPatientList
from opal.templatetags import patient_lists
class TabbedListGroupTestCase(OpalTestCase):
def test_context(self):
request = MagicMock(name='Mock Request')
request.user = self.user
mock_context = dict(
list_group=TestTabbedPatientListGroup,
patient_list=TaggingTestPatientList,
request=request
)
ctx = patient_lists.tabbed_list_group(mock_context)
self.assertEqual(TaggingTestPatientList, ctx['active_list'])
expected_members = list(TestTabbedPatientListGroup.get_member_lists_for_user(self.user))
self.assertEqual(expected_members, list(ctx['members']))
|
|
1a50184437e73b8e14e6cd3a35ff5d5dc0e53fd5
|
dna/test/test_DoublePinchHex.py
|
dna/test/test_DoublePinchHex.py
|
import components as comp
from model import DnaModel
#for plotting:
from numpy import linspace
import matplotlib.pyplot as plt
def round_down(num, divisor):
return num - (num%divisor)
def round_up(num, divisor):
return num + (num%divisor)
#actual test:
class DoublePinchHexTest(DnaModel):
def run(self):
heatex = self.addComponent(comp.PinchHex, 'heatex').nodes(1, 2, 3, 4)
self.nodes[1].update({
'media': 'other',
'cp': 1.5617, #kJ/kg*K
't': 430,
'p': 0.857
})
self.nodes[3].update({
'media': 'kalina',
'y': 0.7,
't': 85,
'p': 100,
'mdot': 1
})
heatex.calc(Nseg = 11, dTmin = 5)
return self
def plot(self):
print('Plotting...')
result = self.result['heatex']
#plot
x = linspace(0, 1, len(result['Th']))
miny = round_down(min(min(result['Tc']), min(result['Th']))-1, 10)
maxy = round_up(max(max(result['Tc']), max(result['Th']))+1, 10)
plt.plot(x, result['Th'], 'r->', label = 'Hot')
plt.plot(x, result['Tc'], 'b-<', label = 'Cold')
plt.xlabel('Location in HEX')
plt.ylabel(r'Temperature [$^\circ$C]')
plt.title('Hot/cold flows through HEX - pinch: ' + str(round(result['dTmin'], 2)) + ' [K]')
plt.ylim(miny, maxy)
plt.grid(True)
plt.savefig('../output/dblPinchHexTest.png')
plt.close()
return self
def analyse(self):
n = self.nodes
print('Hot inlet: ',n[1])
print('Hot outlet: ',n[2])
print('Hot mdot:', n[1]['mdot'], '(expected ~5.8)')
print('Energy difference: ', n[1]['mdot'] * (n[2]['h'] - n[1]['h']),' (expected -2245.094)')
print('Cold inlet: ',n[3])
print('Cold outlet: ',n[4])
print('Cold mdot:', n[3]['mdot'], '(expected ~1)')
print('Energy difference: ', n[3]['mdot'] * (n[4]['h'] - n[3]['h']),' (expected 2245.094)')
return self
|
Add a broken test for a Double Pinch Heat Exchanger
|
Add a broken test for a Double Pinch Heat Exchanger
|
Python
|
bsd-3-clause
|
mwoc/pydna
|
Add a broken test for a Double Pinch Heat Exchanger
|
import components as comp
from model import DnaModel
#for plotting:
from numpy import linspace
import matplotlib.pyplot as plt
def round_down(num, divisor):
return num - (num%divisor)
def round_up(num, divisor):
return num + (num%divisor)
#actual test:
class DoublePinchHexTest(DnaModel):
def run(self):
heatex = self.addComponent(comp.PinchHex, 'heatex').nodes(1, 2, 3, 4)
self.nodes[1].update({
'media': 'other',
'cp': 1.5617, #kJ/kg*K
't': 430,
'p': 0.857
})
self.nodes[3].update({
'media': 'kalina',
'y': 0.7,
't': 85,
'p': 100,
'mdot': 1
})
heatex.calc(Nseg = 11, dTmin = 5)
return self
def plot(self):
print('Plotting...')
result = self.result['heatex']
#plot
x = linspace(0, 1, len(result['Th']))
miny = round_down(min(min(result['Tc']), min(result['Th']))-1, 10)
maxy = round_up(max(max(result['Tc']), max(result['Th']))+1, 10)
plt.plot(x, result['Th'], 'r->', label = 'Hot')
plt.plot(x, result['Tc'], 'b-<', label = 'Cold')
plt.xlabel('Location in HEX')
plt.ylabel(r'Temperature [$^\circ$C]')
plt.title('Hot/cold flows through HEX - pinch: ' + str(round(result['dTmin'], 2)) + ' [K]')
plt.ylim(miny, maxy)
plt.grid(True)
plt.savefig('../output/dblPinchHexTest.png')
plt.close()
return self
def analyse(self):
n = self.nodes
print('Hot inlet: ',n[1])
print('Hot outlet: ',n[2])
print('Hot mdot:', n[1]['mdot'], '(expected ~5.8)')
print('Energy difference: ', n[1]['mdot'] * (n[2]['h'] - n[1]['h']),' (expected -2245.094)')
print('Cold inlet: ',n[3])
print('Cold outlet: ',n[4])
print('Cold mdot:', n[3]['mdot'], '(expected ~1)')
print('Energy difference: ', n[3]['mdot'] * (n[4]['h'] - n[3]['h']),' (expected 2245.094)')
return self
|
<commit_before><commit_msg>Add a broken test for a Double Pinch Heat Exchanger<commit_after>
|
import components as comp
from model import DnaModel
#for plotting:
from numpy import linspace
import matplotlib.pyplot as plt
def round_down(num, divisor):
return num - (num%divisor)
def round_up(num, divisor):
return num + (num%divisor)
#actual test:
class DoublePinchHexTest(DnaModel):
def run(self):
heatex = self.addComponent(comp.PinchHex, 'heatex').nodes(1, 2, 3, 4)
self.nodes[1].update({
'media': 'other',
'cp': 1.5617, #kJ/kg*K
't': 430,
'p': 0.857
})
self.nodes[3].update({
'media': 'kalina',
'y': 0.7,
't': 85,
'p': 100,
'mdot': 1
})
heatex.calc(Nseg = 11, dTmin = 5)
return self
def plot(self):
print('Plotting...')
result = self.result['heatex']
#plot
x = linspace(0, 1, len(result['Th']))
miny = round_down(min(min(result['Tc']), min(result['Th']))-1, 10)
maxy = round_up(max(max(result['Tc']), max(result['Th']))+1, 10)
plt.plot(x, result['Th'], 'r->', label = 'Hot')
plt.plot(x, result['Tc'], 'b-<', label = 'Cold')
plt.xlabel('Location in HEX')
plt.ylabel(r'Temperature [$^\circ$C]')
plt.title('Hot/cold flows through HEX - pinch: ' + str(round(result['dTmin'], 2)) + ' [K]')
plt.ylim(miny, maxy)
plt.grid(True)
plt.savefig('../output/dblPinchHexTest.png')
plt.close()
return self
def analyse(self):
n = self.nodes
print('Hot inlet: ',n[1])
print('Hot outlet: ',n[2])
print('Hot mdot:', n[1]['mdot'], '(expected ~5.8)')
print('Energy difference: ', n[1]['mdot'] * (n[2]['h'] - n[1]['h']),' (expected -2245.094)')
print('Cold inlet: ',n[3])
print('Cold outlet: ',n[4])
print('Cold mdot:', n[3]['mdot'], '(expected ~1)')
print('Energy difference: ', n[3]['mdot'] * (n[4]['h'] - n[3]['h']),' (expected 2245.094)')
return self
|
Add a broken test for a Double Pinch Heat Exchangerimport components as comp
from model import DnaModel
#for plotting:
from numpy import linspace
import matplotlib.pyplot as plt
def round_down(num, divisor):
return num - (num%divisor)
def round_up(num, divisor):
return num + (num%divisor)
#actual test:
class DoublePinchHexTest(DnaModel):
def run(self):
heatex = self.addComponent(comp.PinchHex, 'heatex').nodes(1, 2, 3, 4)
self.nodes[1].update({
'media': 'other',
'cp': 1.5617, #kJ/kg*K
't': 430,
'p': 0.857
})
self.nodes[3].update({
'media': 'kalina',
'y': 0.7,
't': 85,
'p': 100,
'mdot': 1
})
heatex.calc(Nseg = 11, dTmin = 5)
return self
def plot(self):
print('Plotting...')
result = self.result['heatex']
#plot
x = linspace(0, 1, len(result['Th']))
miny = round_down(min(min(result['Tc']), min(result['Th']))-1, 10)
maxy = round_up(max(max(result['Tc']), max(result['Th']))+1, 10)
plt.plot(x, result['Th'], 'r->', label = 'Hot')
plt.plot(x, result['Tc'], 'b-<', label = 'Cold')
plt.xlabel('Location in HEX')
plt.ylabel(r'Temperature [$^\circ$C]')
plt.title('Hot/cold flows through HEX - pinch: ' + str(round(result['dTmin'], 2)) + ' [K]')
plt.ylim(miny, maxy)
plt.grid(True)
plt.savefig('../output/dblPinchHexTest.png')
plt.close()
return self
def analyse(self):
n = self.nodes
print('Hot inlet: ',n[1])
print('Hot outlet: ',n[2])
print('Hot mdot:', n[1]['mdot'], '(expected ~5.8)')
print('Energy difference: ', n[1]['mdot'] * (n[2]['h'] - n[1]['h']),' (expected -2245.094)')
print('Cold inlet: ',n[3])
print('Cold outlet: ',n[4])
print('Cold mdot:', n[3]['mdot'], '(expected ~1)')
print('Energy difference: ', n[3]['mdot'] * (n[4]['h'] - n[3]['h']),' (expected 2245.094)')
return self
|
<commit_before><commit_msg>Add a broken test for a Double Pinch Heat Exchanger<commit_after>import components as comp
from model import DnaModel
#for plotting:
from numpy import linspace
import matplotlib.pyplot as plt
def round_down(num, divisor):
return num - (num%divisor)
def round_up(num, divisor):
return num + (num%divisor)
#actual test:
class DoublePinchHexTest(DnaModel):
def run(self):
heatex = self.addComponent(comp.PinchHex, 'heatex').nodes(1, 2, 3, 4)
self.nodes[1].update({
'media': 'other',
'cp': 1.5617, #kJ/kg*K
't': 430,
'p': 0.857
})
self.nodes[3].update({
'media': 'kalina',
'y': 0.7,
't': 85,
'p': 100,
'mdot': 1
})
heatex.calc(Nseg = 11, dTmin = 5)
return self
def plot(self):
print('Plotting...')
result = self.result['heatex']
#plot
x = linspace(0, 1, len(result['Th']))
miny = round_down(min(min(result['Tc']), min(result['Th']))-1, 10)
maxy = round_up(max(max(result['Tc']), max(result['Th']))+1, 10)
plt.plot(x, result['Th'], 'r->', label = 'Hot')
plt.plot(x, result['Tc'], 'b-<', label = 'Cold')
plt.xlabel('Location in HEX')
plt.ylabel(r'Temperature [$^\circ$C]')
plt.title('Hot/cold flows through HEX - pinch: ' + str(round(result['dTmin'], 2)) + ' [K]')
plt.ylim(miny, maxy)
plt.grid(True)
plt.savefig('../output/dblPinchHexTest.png')
plt.close()
return self
def analyse(self):
n = self.nodes
print('Hot inlet: ',n[1])
print('Hot outlet: ',n[2])
print('Hot mdot:', n[1]['mdot'], '(expected ~5.8)')
print('Energy difference: ', n[1]['mdot'] * (n[2]['h'] - n[1]['h']),' (expected -2245.094)')
print('Cold inlet: ',n[3])
print('Cold outlet: ',n[4])
print('Cold mdot:', n[3]['mdot'], '(expected ~1)')
print('Energy difference: ', n[3]['mdot'] * (n[4]['h'] - n[3]['h']),' (expected 2245.094)')
return self
|
|
fd6056cc8b226f2d89ef63cbc03b2b4966d06593
|
numba/tests/test_remove_dead.py
|
numba/tests/test_remove_dead.py
|
from numba import compiler, typing
from numba.targets import cpu
from numba import types
from numba.targets.registry import cpu_target
from numba import config
from numba.annotations import type_annotations
from numba.ir_utils import copy_propagate, apply_copy_propagate, get_name_var_table, remove_dels, remove_dead
from numba import ir
from numba import unittest_support as unittest
def test_will_propagate(b, z, w):
x = 3
if b > 0:
y = z + w
else:
y = 0
a = 2 * x
return a < b
def null_func(a,b,c):
False
def findLhsAssign(func_ir, var):
for label, block in func_ir.blocks.items():
for i, inst in enumerate(block.body):
if isinstance(inst, ir.Assign) and inst.target.name==var:
return True
return False
class TestRemoveDead(unittest.TestCase):
def test1(self):
typingctx = typing.Context()
targetctx = cpu.CPUContext(typingctx)
test_ir = compiler.run_frontend(test_will_propagate)
#print("Num blocks = ", len(test_ir.blocks))
#print(test_ir.dump())
with cpu_target.nested_context(typingctx, targetctx):
typingctx.refresh()
targetctx.refresh()
args = (types.int64, types.int64, types.int64)
typemap, return_type, calltypes = compiler.type_inference_stage(typingctx, test_ir, args, None)
#print("typemap = ", typemap)
#print("return_type = ", return_type)
type_annotation = type_annotations.TypeAnnotation(
func_ir=test_ir,
typemap=typemap,
calltypes=calltypes,
lifted=(),
lifted_from=None,
args=args,
return_type=return_type,
html_output=config.HTML)
remove_dels(test_ir.blocks)
in_cps, out_cps = copy_propagate(test_ir.blocks, typemap)
#print("in_cps = ", in_cps)
#print("out_cps = ", out_cps)
apply_copy_propagate(test_ir.blocks, in_cps, get_name_var_table(test_ir.blocks), null_func, None, typemap, calltypes)
#print(test_ir.dump())
#print("findAssign = ", findAssign(test_ir, "x"))
remove_dead(test_ir.blocks, test_ir.arg_names)
#print(test_ir.dump())
self.assertFalse(findLhsAssign(test_ir, "x"))
if __name__ == "__main__":
unittest.main()
|
Add test for dead code removal.
|
Add test for dead code removal.
|
Python
|
bsd-2-clause
|
stonebig/numba,stonebig/numba,sklam/numba,cpcloud/numba,jriehl/numba,IntelLabs/numba,jriehl/numba,sklam/numba,gmarkall/numba,gmarkall/numba,numba/numba,sklam/numba,IntelLabs/numba,stonebig/numba,stuartarchibald/numba,IntelLabs/numba,seibert/numba,gmarkall/numba,seibert/numba,cpcloud/numba,stuartarchibald/numba,cpcloud/numba,numba/numba,numba/numba,numba/numba,IntelLabs/numba,IntelLabs/numba,jriehl/numba,sklam/numba,stuartarchibald/numba,jriehl/numba,cpcloud/numba,cpcloud/numba,sklam/numba,gmarkall/numba,seibert/numba,stonebig/numba,numba/numba,stuartarchibald/numba,jriehl/numba,gmarkall/numba,seibert/numba,stonebig/numba,stuartarchibald/numba,seibert/numba
|
Add test for dead code removal.
|
from numba import compiler, typing
from numba.targets import cpu
from numba import types
from numba.targets.registry import cpu_target
from numba import config
from numba.annotations import type_annotations
from numba.ir_utils import copy_propagate, apply_copy_propagate, get_name_var_table, remove_dels, remove_dead
from numba import ir
from numba import unittest_support as unittest
def test_will_propagate(b, z, w):
x = 3
if b > 0:
y = z + w
else:
y = 0
a = 2 * x
return a < b
def null_func(a,b,c):
False
def findLhsAssign(func_ir, var):
for label, block in func_ir.blocks.items():
for i, inst in enumerate(block.body):
if isinstance(inst, ir.Assign) and inst.target.name==var:
return True
return False
class TestRemoveDead(unittest.TestCase):
def test1(self):
typingctx = typing.Context()
targetctx = cpu.CPUContext(typingctx)
test_ir = compiler.run_frontend(test_will_propagate)
#print("Num blocks = ", len(test_ir.blocks))
#print(test_ir.dump())
with cpu_target.nested_context(typingctx, targetctx):
typingctx.refresh()
targetctx.refresh()
args = (types.int64, types.int64, types.int64)
typemap, return_type, calltypes = compiler.type_inference_stage(typingctx, test_ir, args, None)
#print("typemap = ", typemap)
#print("return_type = ", return_type)
type_annotation = type_annotations.TypeAnnotation(
func_ir=test_ir,
typemap=typemap,
calltypes=calltypes,
lifted=(),
lifted_from=None,
args=args,
return_type=return_type,
html_output=config.HTML)
remove_dels(test_ir.blocks)
in_cps, out_cps = copy_propagate(test_ir.blocks, typemap)
#print("in_cps = ", in_cps)
#print("out_cps = ", out_cps)
apply_copy_propagate(test_ir.blocks, in_cps, get_name_var_table(test_ir.blocks), null_func, None, typemap, calltypes)
#print(test_ir.dump())
#print("findAssign = ", findAssign(test_ir, "x"))
remove_dead(test_ir.blocks, test_ir.arg_names)
#print(test_ir.dump())
self.assertFalse(findLhsAssign(test_ir, "x"))
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add test for dead code removal.<commit_after>
|
from numba import compiler, typing
from numba.targets import cpu
from numba import types
from numba.targets.registry import cpu_target
from numba import config
from numba.annotations import type_annotations
from numba.ir_utils import copy_propagate, apply_copy_propagate, get_name_var_table, remove_dels, remove_dead
from numba import ir
from numba import unittest_support as unittest
def test_will_propagate(b, z, w):
x = 3
if b > 0:
y = z + w
else:
y = 0
a = 2 * x
return a < b
def null_func(a,b,c):
False
def findLhsAssign(func_ir, var):
for label, block in func_ir.blocks.items():
for i, inst in enumerate(block.body):
if isinstance(inst, ir.Assign) and inst.target.name==var:
return True
return False
class TestRemoveDead(unittest.TestCase):
def test1(self):
typingctx = typing.Context()
targetctx = cpu.CPUContext(typingctx)
test_ir = compiler.run_frontend(test_will_propagate)
#print("Num blocks = ", len(test_ir.blocks))
#print(test_ir.dump())
with cpu_target.nested_context(typingctx, targetctx):
typingctx.refresh()
targetctx.refresh()
args = (types.int64, types.int64, types.int64)
typemap, return_type, calltypes = compiler.type_inference_stage(typingctx, test_ir, args, None)
#print("typemap = ", typemap)
#print("return_type = ", return_type)
type_annotation = type_annotations.TypeAnnotation(
func_ir=test_ir,
typemap=typemap,
calltypes=calltypes,
lifted=(),
lifted_from=None,
args=args,
return_type=return_type,
html_output=config.HTML)
remove_dels(test_ir.blocks)
in_cps, out_cps = copy_propagate(test_ir.blocks, typemap)
#print("in_cps = ", in_cps)
#print("out_cps = ", out_cps)
apply_copy_propagate(test_ir.blocks, in_cps, get_name_var_table(test_ir.blocks), null_func, None, typemap, calltypes)
#print(test_ir.dump())
#print("findAssign = ", findAssign(test_ir, "x"))
remove_dead(test_ir.blocks, test_ir.arg_names)
#print(test_ir.dump())
self.assertFalse(findLhsAssign(test_ir, "x"))
if __name__ == "__main__":
unittest.main()
|
Add test for dead code removal.from numba import compiler, typing
from numba.targets import cpu
from numba import types
from numba.targets.registry import cpu_target
from numba import config
from numba.annotations import type_annotations
from numba.ir_utils import copy_propagate, apply_copy_propagate, get_name_var_table, remove_dels, remove_dead
from numba import ir
from numba import unittest_support as unittest
def test_will_propagate(b, z, w):
x = 3
if b > 0:
y = z + w
else:
y = 0
a = 2 * x
return a < b
def null_func(a,b,c):
False
def findLhsAssign(func_ir, var):
for label, block in func_ir.blocks.items():
for i, inst in enumerate(block.body):
if isinstance(inst, ir.Assign) and inst.target.name==var:
return True
return False
class TestRemoveDead(unittest.TestCase):
def test1(self):
typingctx = typing.Context()
targetctx = cpu.CPUContext(typingctx)
test_ir = compiler.run_frontend(test_will_propagate)
#print("Num blocks = ", len(test_ir.blocks))
#print(test_ir.dump())
with cpu_target.nested_context(typingctx, targetctx):
typingctx.refresh()
targetctx.refresh()
args = (types.int64, types.int64, types.int64)
typemap, return_type, calltypes = compiler.type_inference_stage(typingctx, test_ir, args, None)
#print("typemap = ", typemap)
#print("return_type = ", return_type)
type_annotation = type_annotations.TypeAnnotation(
func_ir=test_ir,
typemap=typemap,
calltypes=calltypes,
lifted=(),
lifted_from=None,
args=args,
return_type=return_type,
html_output=config.HTML)
remove_dels(test_ir.blocks)
in_cps, out_cps = copy_propagate(test_ir.blocks, typemap)
#print("in_cps = ", in_cps)
#print("out_cps = ", out_cps)
apply_copy_propagate(test_ir.blocks, in_cps, get_name_var_table(test_ir.blocks), null_func, None, typemap, calltypes)
#print(test_ir.dump())
#print("findAssign = ", findAssign(test_ir, "x"))
remove_dead(test_ir.blocks, test_ir.arg_names)
#print(test_ir.dump())
self.assertFalse(findLhsAssign(test_ir, "x"))
if __name__ == "__main__":
unittest.main()
|
<commit_before><commit_msg>Add test for dead code removal.<commit_after>from numba import compiler, typing
from numba.targets import cpu
from numba import types
from numba.targets.registry import cpu_target
from numba import config
from numba.annotations import type_annotations
from numba.ir_utils import copy_propagate, apply_copy_propagate, get_name_var_table, remove_dels, remove_dead
from numba import ir
from numba import unittest_support as unittest
def test_will_propagate(b, z, w):
x = 3
if b > 0:
y = z + w
else:
y = 0
a = 2 * x
return a < b
def null_func(a,b,c):
False
def findLhsAssign(func_ir, var):
for label, block in func_ir.blocks.items():
for i, inst in enumerate(block.body):
if isinstance(inst, ir.Assign) and inst.target.name==var:
return True
return False
class TestRemoveDead(unittest.TestCase):
def test1(self):
typingctx = typing.Context()
targetctx = cpu.CPUContext(typingctx)
test_ir = compiler.run_frontend(test_will_propagate)
#print("Num blocks = ", len(test_ir.blocks))
#print(test_ir.dump())
with cpu_target.nested_context(typingctx, targetctx):
typingctx.refresh()
targetctx.refresh()
args = (types.int64, types.int64, types.int64)
typemap, return_type, calltypes = compiler.type_inference_stage(typingctx, test_ir, args, None)
#print("typemap = ", typemap)
#print("return_type = ", return_type)
type_annotation = type_annotations.TypeAnnotation(
func_ir=test_ir,
typemap=typemap,
calltypes=calltypes,
lifted=(),
lifted_from=None,
args=args,
return_type=return_type,
html_output=config.HTML)
remove_dels(test_ir.blocks)
in_cps, out_cps = copy_propagate(test_ir.blocks, typemap)
#print("in_cps = ", in_cps)
#print("out_cps = ", out_cps)
apply_copy_propagate(test_ir.blocks, in_cps, get_name_var_table(test_ir.blocks), null_func, None, typemap, calltypes)
#print(test_ir.dump())
#print("findAssign = ", findAssign(test_ir, "x"))
remove_dead(test_ir.blocks, test_ir.arg_names)
#print(test_ir.dump())
self.assertFalse(findLhsAssign(test_ir, "x"))
if __name__ == "__main__":
unittest.main()
|
|
31a6433ee74d8d13ba3035a41db8e925de1d2b5d
|
bin/2000/crosswalk_msa_tract.py
|
bin/2000/crosswalk_msa_tract.py
|
"""crosswalk_msa_tract.py
Extract the crosswalk between 2000 msa and tracts.
"""
import os
import csv
#
# Import data
#
## MSA to counties crosswalk
# county_to_msa = {county: {msa: [cousub ids]}
county_to_msa = {}
with open('data/2000/crosswalks/msa_county.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
county = rows[1]
msa = rows[0]
county_to_msa[county] = msa
## Import list of states
states = []
with open('data/state_numbers.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
states.append(rows[0])
## Import all tracts ids
tracts = []
for st in states:
path = 'data/2000/shp/states/%s/tracts.shp'%st
with fiona.open(path, 'r', 'ESRI Shapefile') as f:
tracts.append(f['properties']['CTIDFP00'])
#
# Group by MSA
#
msa_tract = {}
for tr in tracts:
county = tr[:5]
if county in county_to_msa:
msa = county_to_msa[county]
if msa not in msa_tract:
msa_tract[msa] = []
msa_tract[msa].append(tr)
#
# Save the crosswalk
#
with open('data/2000/crosswalks/msa_tract.csv', 'w') as output:
output.write('MSA FIP\tTRACT FIP\n')
for msa in msa_tract:
## Remove duplicates
trs = list(set(msa_tract[msa]))
for tr in trs:
output.write('%s\t%s\n'%(msa, tr))
|
Add script to extract the crosswalk between msas and tracts
|
Add script to extract the crosswalk between msas and tracts
|
Python
|
bsd-2-clause
|
scities/2000-us-metro-atlas
|
Add script to extract the crosswalk between msas and tracts
|
"""crosswalk_msa_tract.py
Extract the crosswalk between 2000 msa and tracts.
"""
import os
import csv
#
# Import data
#
## MSA to counties crosswalk
# county_to_msa = {county: {msa: [cousub ids]}
county_to_msa = {}
with open('data/2000/crosswalks/msa_county.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
county = rows[1]
msa = rows[0]
county_to_msa[county] = msa
## Import list of states
states = []
with open('data/state_numbers.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
states.append(rows[0])
## Import all tracts ids
tracts = []
for st in states:
path = 'data/2000/shp/states/%s/tracts.shp'%st
with fiona.open(path, 'r', 'ESRI Shapefile') as f:
tracts.append(f['properties']['CTIDFP00'])
#
# Group by MSA
#
msa_tract = {}
for tr in tracts:
county = tr[:5]
if county in county_to_msa:
msa = county_to_msa[county]
if msa not in msa_tract:
msa_tract[msa] = []
msa_tract[msa].append(tr)
#
# Save the crosswalk
#
with open('data/2000/crosswalks/msa_tract.csv', 'w') as output:
output.write('MSA FIP\tTRACT FIP\n')
for msa in msa_tract:
## Remove duplicates
trs = list(set(msa_tract[msa]))
for tr in trs:
output.write('%s\t%s\n'%(msa, tr))
|
<commit_before><commit_msg>Add script to extract the crosswalk between msas and tracts<commit_after>
|
"""crosswalk_msa_tract.py
Extract the crosswalk between 2000 msa and tracts.
"""
import os
import csv
#
# Import data
#
## MSA to counties crosswalk
# county_to_msa = {county: {msa: [cousub ids]}
county_to_msa = {}
with open('data/2000/crosswalks/msa_county.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
county = rows[1]
msa = rows[0]
county_to_msa[county] = msa
## Import list of states
states = []
with open('data/state_numbers.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
states.append(rows[0])
## Import all tracts ids
tracts = []
for st in states:
path = 'data/2000/shp/states/%s/tracts.shp'%st
with fiona.open(path, 'r', 'ESRI Shapefile') as f:
tracts.append(f['properties']['CTIDFP00'])
#
# Group by MSA
#
msa_tract = {}
for tr in tracts:
county = tr[:5]
if county in county_to_msa:
msa = county_to_msa[county]
if msa not in msa_tract:
msa_tract[msa] = []
msa_tract[msa].append(tr)
#
# Save the crosswalk
#
with open('data/2000/crosswalks/msa_tract.csv', 'w') as output:
output.write('MSA FIP\tTRACT FIP\n')
for msa in msa_tract:
## Remove duplicates
trs = list(set(msa_tract[msa]))
for tr in trs:
output.write('%s\t%s\n'%(msa, tr))
|
Add script to extract the crosswalk between msas and tracts"""crosswalk_msa_tract.py
Extract the crosswalk between 2000 msa and tracts.
"""
import os
import csv
#
# Import data
#
## MSA to counties crosswalk
# county_to_msa = {county: {msa: [cousub ids]}
county_to_msa = {}
with open('data/2000/crosswalks/msa_county.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
county = rows[1]
msa = rows[0]
county_to_msa[county] = msa
## Import list of states
states = []
with open('data/state_numbers.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
states.append(rows[0])
## Import all tracts ids
tracts = []
for st in states:
path = 'data/2000/shp/states/%s/tracts.shp'%st
with fiona.open(path, 'r', 'ESRI Shapefile') as f:
tracts.append(f['properties']['CTIDFP00'])
#
# Group by MSA
#
msa_tract = {}
for tr in tracts:
county = tr[:5]
if county in county_to_msa:
msa = county_to_msa[county]
if msa not in msa_tract:
msa_tract[msa] = []
msa_tract[msa].append(tr)
#
# Save the crosswalk
#
with open('data/2000/crosswalks/msa_tract.csv', 'w') as output:
output.write('MSA FIP\tTRACT FIP\n')
for msa in msa_tract:
## Remove duplicates
trs = list(set(msa_tract[msa]))
for tr in trs:
output.write('%s\t%s\n'%(msa, tr))
|
<commit_before><commit_msg>Add script to extract the crosswalk between msas and tracts<commit_after>"""crosswalk_msa_tract.py
Extract the crosswalk between 2000 msa and tracts.
"""
import os
import csv
#
# Import data
#
## MSA to counties crosswalk
# county_to_msa = {county: {msa: [cousub ids]}
county_to_msa = {}
with open('data/2000/crosswalks/msa_county.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
county = rows[1]
msa = rows[0]
county_to_msa[county] = msa
## Import list of states
states = []
with open('data/state_numbers.csv', 'r') as source:
reader = csv.reader(source, delimiter='\t')
reader.next()
for rows in reader:
states.append(rows[0])
## Import all tracts ids
tracts = []
for st in states:
path = 'data/2000/shp/states/%s/tracts.shp'%st
with fiona.open(path, 'r', 'ESRI Shapefile') as f:
tracts.append(f['properties']['CTIDFP00'])
#
# Group by MSA
#
msa_tract = {}
for tr in tracts:
county = tr[:5]
if county in county_to_msa:
msa = county_to_msa[county]
if msa not in msa_tract:
msa_tract[msa] = []
msa_tract[msa].append(tr)
#
# Save the crosswalk
#
with open('data/2000/crosswalks/msa_tract.csv', 'w') as output:
output.write('MSA FIP\tTRACT FIP\n')
for msa in msa_tract:
## Remove duplicates
trs = list(set(msa_tract[msa]))
for tr in trs:
output.write('%s\t%s\n'%(msa, tr))
|
|
0fbea33e99af8fbb30ba508b649ec0e898a6cc98
|
examples/work_queue.py
|
examples/work_queue.py
|
import threading
import time
from walrus import Walrus
db = Walrus()
def create_consumer_group():
consumer = db.consumer_group('tasks-cg', ['tasks'])
if not db.exists('tasks'):
db.xadd('tasks', {'dummy': ''}, id=b'0-1')
consumer.create()
consumer.set_id('$')
return consumer
def worker(tid, consumer, stop_signal):
while not stop_signal.is_set():
messages = consumer.tasks.read(count=1, timeout=1000)
if messages is not None:
message_id, data = messages[0]
print('worker %s processing: %s' % (tid, data))
consumer.tasks.ack(message_id)
def main():
consumer = create_consumer_group()
stream = consumer.tasks
stop_signal = threading.Event()
workers = []
for i in range(4):
worker_t = threading.Thread(target=worker,
args=(i + 1, consumer, stop_signal))
worker_t.daemon = True
workers.append(worker_t)
print('Seeding stream with 10 events')
for i in range(10):
stream.add({'data': 'event %s' % i})
print('Starting worker pool')
for worker_t in workers:
worker_t.start()
print('Adding 20 more messages, 4 per second')
for i in range(10, 30):
print('Adding event %s' % i)
stream.add({'data': 'event %s' % i})
time.sleep(0.25)
stop_signal.set()
[t.join() for t in workers]
if __name__ == '__main__':
main()
|
Add work queue example using streams.
|
Add work queue example using streams.
|
Python
|
mit
|
coleifer/walrus
|
Add work queue example using streams.
|
import threading
import time
from walrus import Walrus
db = Walrus()
def create_consumer_group():
consumer = db.consumer_group('tasks-cg', ['tasks'])
if not db.exists('tasks'):
db.xadd('tasks', {'dummy': ''}, id=b'0-1')
consumer.create()
consumer.set_id('$')
return consumer
def worker(tid, consumer, stop_signal):
while not stop_signal.is_set():
messages = consumer.tasks.read(count=1, timeout=1000)
if messages is not None:
message_id, data = messages[0]
print('worker %s processing: %s' % (tid, data))
consumer.tasks.ack(message_id)
def main():
consumer = create_consumer_group()
stream = consumer.tasks
stop_signal = threading.Event()
workers = []
for i in range(4):
worker_t = threading.Thread(target=worker,
args=(i + 1, consumer, stop_signal))
worker_t.daemon = True
workers.append(worker_t)
print('Seeding stream with 10 events')
for i in range(10):
stream.add({'data': 'event %s' % i})
print('Starting worker pool')
for worker_t in workers:
worker_t.start()
print('Adding 20 more messages, 4 per second')
for i in range(10, 30):
print('Adding event %s' % i)
stream.add({'data': 'event %s' % i})
time.sleep(0.25)
stop_signal.set()
[t.join() for t in workers]
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add work queue example using streams.<commit_after>
|
import threading
import time
from walrus import Walrus
db = Walrus()
def create_consumer_group():
consumer = db.consumer_group('tasks-cg', ['tasks'])
if not db.exists('tasks'):
db.xadd('tasks', {'dummy': ''}, id=b'0-1')
consumer.create()
consumer.set_id('$')
return consumer
def worker(tid, consumer, stop_signal):
while not stop_signal.is_set():
messages = consumer.tasks.read(count=1, timeout=1000)
if messages is not None:
message_id, data = messages[0]
print('worker %s processing: %s' % (tid, data))
consumer.tasks.ack(message_id)
def main():
consumer = create_consumer_group()
stream = consumer.tasks
stop_signal = threading.Event()
workers = []
for i in range(4):
worker_t = threading.Thread(target=worker,
args=(i + 1, consumer, stop_signal))
worker_t.daemon = True
workers.append(worker_t)
print('Seeding stream with 10 events')
for i in range(10):
stream.add({'data': 'event %s' % i})
print('Starting worker pool')
for worker_t in workers:
worker_t.start()
print('Adding 20 more messages, 4 per second')
for i in range(10, 30):
print('Adding event %s' % i)
stream.add({'data': 'event %s' % i})
time.sleep(0.25)
stop_signal.set()
[t.join() for t in workers]
if __name__ == '__main__':
main()
|
Add work queue example using streams.import threading
import time
from walrus import Walrus
db = Walrus()
def create_consumer_group():
consumer = db.consumer_group('tasks-cg', ['tasks'])
if not db.exists('tasks'):
db.xadd('tasks', {'dummy': ''}, id=b'0-1')
consumer.create()
consumer.set_id('$')
return consumer
def worker(tid, consumer, stop_signal):
while not stop_signal.is_set():
messages = consumer.tasks.read(count=1, timeout=1000)
if messages is not None:
message_id, data = messages[0]
print('worker %s processing: %s' % (tid, data))
consumer.tasks.ack(message_id)
def main():
consumer = create_consumer_group()
stream = consumer.tasks
stop_signal = threading.Event()
workers = []
for i in range(4):
worker_t = threading.Thread(target=worker,
args=(i + 1, consumer, stop_signal))
worker_t.daemon = True
workers.append(worker_t)
print('Seeding stream with 10 events')
for i in range(10):
stream.add({'data': 'event %s' % i})
print('Starting worker pool')
for worker_t in workers:
worker_t.start()
print('Adding 20 more messages, 4 per second')
for i in range(10, 30):
print('Adding event %s' % i)
stream.add({'data': 'event %s' % i})
time.sleep(0.25)
stop_signal.set()
[t.join() for t in workers]
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add work queue example using streams.<commit_after>import threading
import time
from walrus import Walrus
db = Walrus()
def create_consumer_group():
consumer = db.consumer_group('tasks-cg', ['tasks'])
if not db.exists('tasks'):
db.xadd('tasks', {'dummy': ''}, id=b'0-1')
consumer.create()
consumer.set_id('$')
return consumer
def worker(tid, consumer, stop_signal):
while not stop_signal.is_set():
messages = consumer.tasks.read(count=1, timeout=1000)
if messages is not None:
message_id, data = messages[0]
print('worker %s processing: %s' % (tid, data))
consumer.tasks.ack(message_id)
def main():
consumer = create_consumer_group()
stream = consumer.tasks
stop_signal = threading.Event()
workers = []
for i in range(4):
worker_t = threading.Thread(target=worker,
args=(i + 1, consumer, stop_signal))
worker_t.daemon = True
workers.append(worker_t)
print('Seeding stream with 10 events')
for i in range(10):
stream.add({'data': 'event %s' % i})
print('Starting worker pool')
for worker_t in workers:
worker_t.start()
print('Adding 20 more messages, 4 per second')
for i in range(10, 30):
print('Adding event %s' % i)
stream.add({'data': 'event %s' % i})
time.sleep(0.25)
stop_signal.set()
[t.join() for t in workers]
if __name__ == '__main__':
main()
|
|
99340e34e219101ad21acdff22665a3fdbf5a64b
|
gravity/migrations/0003_ispindelconfiguration_temperature_correction.py
|
gravity/migrations/0003_ispindelconfiguration_temperature_correction.py
|
# Generated by Django 3.0.11 on 2020-11-27 19:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gravity', '0002_brewfather_updates'),
]
operations = [
migrations.AddField(
model_name='ispindelconfiguration',
name='temperature_correction',
field=models.FloatField(default=0.0, help_text='Value to correct iSpindel temperature value with '),
),
]
|
Add migration for iSpindel temperature correction
|
Add migration for iSpindel temperature correction
|
Python
|
mit
|
thorrak/fermentrack,thorrak/fermentrack,thorrak/fermentrack,thorrak/fermentrack,thorrak/fermentrack
|
Add migration for iSpindel temperature correction
|
# Generated by Django 3.0.11 on 2020-11-27 19:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gravity', '0002_brewfather_updates'),
]
operations = [
migrations.AddField(
model_name='ispindelconfiguration',
name='temperature_correction',
field=models.FloatField(default=0.0, help_text='Value to correct iSpindel temperature value with '),
),
]
|
<commit_before><commit_msg>Add migration for iSpindel temperature correction<commit_after>
|
# Generated by Django 3.0.11 on 2020-11-27 19:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gravity', '0002_brewfather_updates'),
]
operations = [
migrations.AddField(
model_name='ispindelconfiguration',
name='temperature_correction',
field=models.FloatField(default=0.0, help_text='Value to correct iSpindel temperature value with '),
),
]
|
Add migration for iSpindel temperature correction# Generated by Django 3.0.11 on 2020-11-27 19:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gravity', '0002_brewfather_updates'),
]
operations = [
migrations.AddField(
model_name='ispindelconfiguration',
name='temperature_correction',
field=models.FloatField(default=0.0, help_text='Value to correct iSpindel temperature value with '),
),
]
|
<commit_before><commit_msg>Add migration for iSpindel temperature correction<commit_after># Generated by Django 3.0.11 on 2020-11-27 19:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gravity', '0002_brewfather_updates'),
]
operations = [
migrations.AddField(
model_name='ispindelconfiguration',
name='temperature_correction',
field=models.FloatField(default=0.0, help_text='Value to correct iSpindel temperature value with '),
),
]
|
|
7570120ee325e1b0595b39850dba11e4cce7629d
|
src/promo/__init__.py
|
src/promo/__init__.py
|
from contextlib import contextmanager
from email.mime.text import MIMEText
from email.utils import formatdate
from cfg import config, parse_end_date
import os
import re
from teammails import base_path, get_template, smtp_session
@contextmanager
def address_iterator(filename):
if not os.path.isfile(filename):
raise Exception("File not found: %s!" % filename)
splitter = re.compile(r"\s*[,; ]\s*")
with open(filename, "r") as fn:
yield (addr for part in
(splitter.split(line.strip()) for line in fn)
for addr in part)
def send_spam(address_file, debug=True):
template = get_template("spammail", path=base_path(__file__))
sender = "meet&eat Orga <%s>" % config.MAIL_DEFAULT_SENDER
envelope = config.MAIL_DEFAULT_SENDER
register_end = parse_end_date(config.REGISTER_END)
data = dict(event_date=config.EVENT_DATE,
volume=config.VOLUME,
register_end_date=register_end.strftime("%d.%m.%Y"),
pretty_event_date=config.EVENT_DATE_PRETTY,
address=None)
subject = "Einladung zum %d. meet&eat am %s" % (config.VOLUME, config.EVENT_DATE_PRETTY)
with smtp_session() as session:
print "Send Mails ",
i = 0
for address in address_iterator(address_file):
data["address"] = address
content = template.render(**data)
recpt = address
if debug:
recpt = envelope
msg = MIMEText(content, "plain", "utf8")
msg["Subject"] = subject
msg["From"] = sender
msg["To"] = recpt
msg["Date"] = formatdate(localtime=True)
session.sendmail(envelope, [recpt], msg.as_string())
i += 1
print ".",
print " Done - %d Mails sent" % i
|
Add code to send promotional mails
|
Add code to send promotional mails
|
Python
|
bsd-3-clause
|
janLo/meet-and-eat-registration-system,janLo/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system,eXma/meet-and-eat-registration-system,janLo/meet-and-eat-registration-system,janLo/meet-and-eat-registration-system
|
Add code to send promotional mails
|
from contextlib import contextmanager
from email.mime.text import MIMEText
from email.utils import formatdate
from cfg import config, parse_end_date
import os
import re
from teammails import base_path, get_template, smtp_session
@contextmanager
def address_iterator(filename):
if not os.path.isfile(filename):
raise Exception("File not found: %s!" % filename)
splitter = re.compile(r"\s*[,; ]\s*")
with open(filename, "r") as fn:
yield (addr for part in
(splitter.split(line.strip()) for line in fn)
for addr in part)
def send_spam(address_file, debug=True):
template = get_template("spammail", path=base_path(__file__))
sender = "meet&eat Orga <%s>" % config.MAIL_DEFAULT_SENDER
envelope = config.MAIL_DEFAULT_SENDER
register_end = parse_end_date(config.REGISTER_END)
data = dict(event_date=config.EVENT_DATE,
volume=config.VOLUME,
register_end_date=register_end.strftime("%d.%m.%Y"),
pretty_event_date=config.EVENT_DATE_PRETTY,
address=None)
subject = "Einladung zum %d. meet&eat am %s" % (config.VOLUME, config.EVENT_DATE_PRETTY)
with smtp_session() as session:
print "Send Mails ",
i = 0
for address in address_iterator(address_file):
data["address"] = address
content = template.render(**data)
recpt = address
if debug:
recpt = envelope
msg = MIMEText(content, "plain", "utf8")
msg["Subject"] = subject
msg["From"] = sender
msg["To"] = recpt
msg["Date"] = formatdate(localtime=True)
session.sendmail(envelope, [recpt], msg.as_string())
i += 1
print ".",
print " Done - %d Mails sent" % i
|
<commit_before><commit_msg>Add code to send promotional mails<commit_after>
|
from contextlib import contextmanager
from email.mime.text import MIMEText
from email.utils import formatdate
from cfg import config, parse_end_date
import os
import re
from teammails import base_path, get_template, smtp_session
@contextmanager
def address_iterator(filename):
if not os.path.isfile(filename):
raise Exception("File not found: %s!" % filename)
splitter = re.compile(r"\s*[,; ]\s*")
with open(filename, "r") as fn:
yield (addr for part in
(splitter.split(line.strip()) for line in fn)
for addr in part)
def send_spam(address_file, debug=True):
template = get_template("spammail", path=base_path(__file__))
sender = "meet&eat Orga <%s>" % config.MAIL_DEFAULT_SENDER
envelope = config.MAIL_DEFAULT_SENDER
register_end = parse_end_date(config.REGISTER_END)
data = dict(event_date=config.EVENT_DATE,
volume=config.VOLUME,
register_end_date=register_end.strftime("%d.%m.%Y"),
pretty_event_date=config.EVENT_DATE_PRETTY,
address=None)
subject = "Einladung zum %d. meet&eat am %s" % (config.VOLUME, config.EVENT_DATE_PRETTY)
with smtp_session() as session:
print "Send Mails ",
i = 0
for address in address_iterator(address_file):
data["address"] = address
content = template.render(**data)
recpt = address
if debug:
recpt = envelope
msg = MIMEText(content, "plain", "utf8")
msg["Subject"] = subject
msg["From"] = sender
msg["To"] = recpt
msg["Date"] = formatdate(localtime=True)
session.sendmail(envelope, [recpt], msg.as_string())
i += 1
print ".",
print " Done - %d Mails sent" % i
|
Add code to send promotional mailsfrom contextlib import contextmanager
from email.mime.text import MIMEText
from email.utils import formatdate
from cfg import config, parse_end_date
import os
import re
from teammails import base_path, get_template, smtp_session
@contextmanager
def address_iterator(filename):
if not os.path.isfile(filename):
raise Exception("File not found: %s!" % filename)
splitter = re.compile(r"\s*[,; ]\s*")
with open(filename, "r") as fn:
yield (addr for part in
(splitter.split(line.strip()) for line in fn)
for addr in part)
def send_spam(address_file, debug=True):
template = get_template("spammail", path=base_path(__file__))
sender = "meet&eat Orga <%s>" % config.MAIL_DEFAULT_SENDER
envelope = config.MAIL_DEFAULT_SENDER
register_end = parse_end_date(config.REGISTER_END)
data = dict(event_date=config.EVENT_DATE,
volume=config.VOLUME,
register_end_date=register_end.strftime("%d.%m.%Y"),
pretty_event_date=config.EVENT_DATE_PRETTY,
address=None)
subject = "Einladung zum %d. meet&eat am %s" % (config.VOLUME, config.EVENT_DATE_PRETTY)
with smtp_session() as session:
print "Send Mails ",
i = 0
for address in address_iterator(address_file):
data["address"] = address
content = template.render(**data)
recpt = address
if debug:
recpt = envelope
msg = MIMEText(content, "plain", "utf8")
msg["Subject"] = subject
msg["From"] = sender
msg["To"] = recpt
msg["Date"] = formatdate(localtime=True)
session.sendmail(envelope, [recpt], msg.as_string())
i += 1
print ".",
print " Done - %d Mails sent" % i
|
<commit_before><commit_msg>Add code to send promotional mails<commit_after>from contextlib import contextmanager
from email.mime.text import MIMEText
from email.utils import formatdate
from cfg import config, parse_end_date
import os
import re
from teammails import base_path, get_template, smtp_session
@contextmanager
def address_iterator(filename):
if not os.path.isfile(filename):
raise Exception("File not found: %s!" % filename)
splitter = re.compile(r"\s*[,; ]\s*")
with open(filename, "r") as fn:
yield (addr for part in
(splitter.split(line.strip()) for line in fn)
for addr in part)
def send_spam(address_file, debug=True):
template = get_template("spammail", path=base_path(__file__))
sender = "meet&eat Orga <%s>" % config.MAIL_DEFAULT_SENDER
envelope = config.MAIL_DEFAULT_SENDER
register_end = parse_end_date(config.REGISTER_END)
data = dict(event_date=config.EVENT_DATE,
volume=config.VOLUME,
register_end_date=register_end.strftime("%d.%m.%Y"),
pretty_event_date=config.EVENT_DATE_PRETTY,
address=None)
subject = "Einladung zum %d. meet&eat am %s" % (config.VOLUME, config.EVENT_DATE_PRETTY)
with smtp_session() as session:
print "Send Mails ",
i = 0
for address in address_iterator(address_file):
data["address"] = address
content = template.render(**data)
recpt = address
if debug:
recpt = envelope
msg = MIMEText(content, "plain", "utf8")
msg["Subject"] = subject
msg["From"] = sender
msg["To"] = recpt
msg["Date"] = formatdate(localtime=True)
session.sendmail(envelope, [recpt], msg.as_string())
i += 1
print ".",
print " Done - %d Mails sent" % i
|
|
0b8bbb5def7d5f621b26093897b01ac9a14239bf
|
src/tor/x25519-gen.py
|
src/tor/x25519-gen.py
|
#!/usr/bin/env python3
import base64
try:
import nacl.public
except ImportError:
print('PyNaCl is required: "pip install pynacl" or similar')
exit(1)
def key_str(key):
# bytes to base 32
key_bytes = bytes(key)
key_b32 = base64.b32encode(key_bytes)
# strip trailing ====
assert key_b32[-4:] == b'===='
key_b32 = key_b32[:-4]
# change from b'ASDF' to ASDF
s = key_b32.decode('utf-8')
return s
def main():
priv_key = nacl.public.PrivateKey.generate()
pub_key = priv_key.public_key
print('public: %s' % key_str(priv_key))
print('private: %s' % key_str(pub_key))
if __name__ == '__main__':
exit(main())
|
Add script for generating x25519 keys
|
Add script for generating x25519 keys
|
Python
|
unlicense
|
pastly/python-snippits
|
Add script for generating x25519 keys
|
#!/usr/bin/env python3
import base64
try:
import nacl.public
except ImportError:
print('PyNaCl is required: "pip install pynacl" or similar')
exit(1)
def key_str(key):
# bytes to base 32
key_bytes = bytes(key)
key_b32 = base64.b32encode(key_bytes)
# strip trailing ====
assert key_b32[-4:] == b'===='
key_b32 = key_b32[:-4]
# change from b'ASDF' to ASDF
s = key_b32.decode('utf-8')
return s
def main():
priv_key = nacl.public.PrivateKey.generate()
pub_key = priv_key.public_key
print('public: %s' % key_str(priv_key))
print('private: %s' % key_str(pub_key))
if __name__ == '__main__':
exit(main())
|
<commit_before><commit_msg>Add script for generating x25519 keys<commit_after>
|
#!/usr/bin/env python3
import base64
try:
import nacl.public
except ImportError:
print('PyNaCl is required: "pip install pynacl" or similar')
exit(1)
def key_str(key):
# bytes to base 32
key_bytes = bytes(key)
key_b32 = base64.b32encode(key_bytes)
# strip trailing ====
assert key_b32[-4:] == b'===='
key_b32 = key_b32[:-4]
# change from b'ASDF' to ASDF
s = key_b32.decode('utf-8')
return s
def main():
priv_key = nacl.public.PrivateKey.generate()
pub_key = priv_key.public_key
print('public: %s' % key_str(priv_key))
print('private: %s' % key_str(pub_key))
if __name__ == '__main__':
exit(main())
|
Add script for generating x25519 keys#!/usr/bin/env python3
import base64
try:
import nacl.public
except ImportError:
print('PyNaCl is required: "pip install pynacl" or similar')
exit(1)
def key_str(key):
# bytes to base 32
key_bytes = bytes(key)
key_b32 = base64.b32encode(key_bytes)
# strip trailing ====
assert key_b32[-4:] == b'===='
key_b32 = key_b32[:-4]
# change from b'ASDF' to ASDF
s = key_b32.decode('utf-8')
return s
def main():
priv_key = nacl.public.PrivateKey.generate()
pub_key = priv_key.public_key
print('public: %s' % key_str(priv_key))
print('private: %s' % key_str(pub_key))
if __name__ == '__main__':
exit(main())
|
<commit_before><commit_msg>Add script for generating x25519 keys<commit_after>#!/usr/bin/env python3
import base64
try:
import nacl.public
except ImportError:
print('PyNaCl is required: "pip install pynacl" or similar')
exit(1)
def key_str(key):
# bytes to base 32
key_bytes = bytes(key)
key_b32 = base64.b32encode(key_bytes)
# strip trailing ====
assert key_b32[-4:] == b'===='
key_b32 = key_b32[:-4]
# change from b'ASDF' to ASDF
s = key_b32.decode('utf-8')
return s
def main():
priv_key = nacl.public.PrivateKey.generate()
pub_key = priv_key.public_key
print('public: %s' % key_str(priv_key))
print('private: %s' % key_str(pub_key))
if __name__ == '__main__':
exit(main())
|
|
0135a28e5e4fc1183737e3d5bade231d7330d564
|
scripts/run_on_swarming_bots/reboot_bot.py
|
scripts/run_on_swarming_bots/reboot_bot.py
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Reboot a Swarming bot."""
import os
import sys
if sys.platform == 'win32':
os.system('shutdown /r /t 0')
else:
os.system('sudo reboot')
|
Add Reboot script for run_on_swarming_bots
|
Add Reboot script for run_on_swarming_bots
Change-Id: I01c72ef2157660e5d7f3c5d47a9b30fa78054e9f
Reviewed-on: https://skia-review.googlesource.com/135381
Auto-Submit: Ben Wagner <3ef7217be91069877d94f7907ce5479000772cd3@google.com>
Commit-Queue: Kevin Lubick <7cdab2cfab351f23814786ba39716e90eed69047@google.com>
Reviewed-by: Kevin Lubick <7cdab2cfab351f23814786ba39716e90eed69047@google.com>
|
Python
|
bsd-3-clause
|
google/skia-buildbot,google/skia-buildbot,google/skia-buildbot,google/skia-buildbot,google/skia-buildbot,google/skia-buildbot,google/skia-buildbot,google/skia-buildbot
|
Add Reboot script for run_on_swarming_bots
Change-Id: I01c72ef2157660e5d7f3c5d47a9b30fa78054e9f
Reviewed-on: https://skia-review.googlesource.com/135381
Auto-Submit: Ben Wagner <3ef7217be91069877d94f7907ce5479000772cd3@google.com>
Commit-Queue: Kevin Lubick <7cdab2cfab351f23814786ba39716e90eed69047@google.com>
Reviewed-by: Kevin Lubick <7cdab2cfab351f23814786ba39716e90eed69047@google.com>
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Reboot a Swarming bot."""
import os
import sys
if sys.platform == 'win32':
os.system('shutdown /r /t 0')
else:
os.system('sudo reboot')
|
<commit_before><commit_msg>Add Reboot script for run_on_swarming_bots
Change-Id: I01c72ef2157660e5d7f3c5d47a9b30fa78054e9f
Reviewed-on: https://skia-review.googlesource.com/135381
Auto-Submit: Ben Wagner <3ef7217be91069877d94f7907ce5479000772cd3@google.com>
Commit-Queue: Kevin Lubick <7cdab2cfab351f23814786ba39716e90eed69047@google.com>
Reviewed-by: Kevin Lubick <7cdab2cfab351f23814786ba39716e90eed69047@google.com><commit_after>
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Reboot a Swarming bot."""
import os
import sys
if sys.platform == 'win32':
os.system('shutdown /r /t 0')
else:
os.system('sudo reboot')
|
Add Reboot script for run_on_swarming_bots
Change-Id: I01c72ef2157660e5d7f3c5d47a9b30fa78054e9f
Reviewed-on: https://skia-review.googlesource.com/135381
Auto-Submit: Ben Wagner <3ef7217be91069877d94f7907ce5479000772cd3@google.com>
Commit-Queue: Kevin Lubick <7cdab2cfab351f23814786ba39716e90eed69047@google.com>
Reviewed-by: Kevin Lubick <7cdab2cfab351f23814786ba39716e90eed69047@google.com>#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Reboot a Swarming bot."""
import os
import sys
if sys.platform == 'win32':
os.system('shutdown /r /t 0')
else:
os.system('sudo reboot')
|
<commit_before><commit_msg>Add Reboot script for run_on_swarming_bots
Change-Id: I01c72ef2157660e5d7f3c5d47a9b30fa78054e9f
Reviewed-on: https://skia-review.googlesource.com/135381
Auto-Submit: Ben Wagner <3ef7217be91069877d94f7907ce5479000772cd3@google.com>
Commit-Queue: Kevin Lubick <7cdab2cfab351f23814786ba39716e90eed69047@google.com>
Reviewed-by: Kevin Lubick <7cdab2cfab351f23814786ba39716e90eed69047@google.com><commit_after>#!/usr/bin/env python
#
# Copyright 2017 Google Inc.
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Reboot a Swarming bot."""
import os
import sys
if sys.platform == 'win32':
os.system('shutdown /r /t 0')
else:
os.system('sudo reboot')
|
|
372fdf61b30041862383b16d2c0ca4ed84d530e4
|
troposphere/s3.py
|
troposphere/s3.py
|
# Copyright (c) 2013, Bob Van Zant <bob@veznat.com>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty, Tags
class WebsiteConfiguration(AWSProperty):
props = {
'IndexDocument': (basestring, False),
'ErrorDocument': (basestring, False),
}
class Bucket(AWSObject):
props = {
'AccessControl': (basestring, False),
'Tags': (Tags, False),
'WebsiteConfiguration': (WebsiteConfiguration, False)
}
access_control_types = [
'Private',
'PublicRead',
'PublicReadWrite',
'AuthenticatedRead',
'BucketOwnerRead',
'BucketOwnerFullControl',
]
def __init__(self, name, **kwargs):
self.type = "AWS::S3::Bucket"
sup = super(Bucket, self)
sup.__init__(name, self.type, "Properties", self.props, **kwargs)
if 'AccessControl' in kwargs:
if kwargs['AccessControl'] not in self.access_control_types:
raise ValueError('AccessControl must be one of "%s"' % (
', '.join(self.access_control_types)))
|
Support for creating S3 buckets.
|
Support for creating S3 buckets.
|
Python
|
bsd-2-clause
|
samcrang/troposphere,alonsodomin/troposphere,johnctitus/troposphere,WeAreCloudar/troposphere,mhahn/troposphere,wangqiang8511/troposphere,ikben/troposphere,7digital/troposphere,LouTheBrew/troposphere,mannytoledo/troposphere,cloudtools/troposphere,DualSpark/troposphere,Yipit/troposphere,inetCatapult/troposphere,dmm92/troposphere,jdc0589/troposphere,ccortezb/troposphere,alonsodomin/troposphere,horacio3/troposphere,Hons/troposphere,amosshapira/troposphere,garnaat/troposphere,pas256/troposphere,kid/troposphere,nicolaka/troposphere,yxd-hde/troposphere,jantman/troposphere,unravelin/troposphere,xxxVxxx/troposphere,ptoraskar/troposphere,ikben/troposphere,pas256/troposphere,cryptickp/troposphere,cloudtools/troposphere,dmm92/troposphere,horacio3/troposphere,craigbruce/troposphere,7digital/troposphere,micahhausler/troposphere,johnctitus/troposphere,iblazevic/troposphere
|
Support for creating S3 buckets.
|
# Copyright (c) 2013, Bob Van Zant <bob@veznat.com>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty, Tags
class WebsiteConfiguration(AWSProperty):
props = {
'IndexDocument': (basestring, False),
'ErrorDocument': (basestring, False),
}
class Bucket(AWSObject):
props = {
'AccessControl': (basestring, False),
'Tags': (Tags, False),
'WebsiteConfiguration': (WebsiteConfiguration, False)
}
access_control_types = [
'Private',
'PublicRead',
'PublicReadWrite',
'AuthenticatedRead',
'BucketOwnerRead',
'BucketOwnerFullControl',
]
def __init__(self, name, **kwargs):
self.type = "AWS::S3::Bucket"
sup = super(Bucket, self)
sup.__init__(name, self.type, "Properties", self.props, **kwargs)
if 'AccessControl' in kwargs:
if kwargs['AccessControl'] not in self.access_control_types:
raise ValueError('AccessControl must be one of "%s"' % (
', '.join(self.access_control_types)))
|
<commit_before><commit_msg>Support for creating S3 buckets.<commit_after>
|
# Copyright (c) 2013, Bob Van Zant <bob@veznat.com>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty, Tags
class WebsiteConfiguration(AWSProperty):
props = {
'IndexDocument': (basestring, False),
'ErrorDocument': (basestring, False),
}
class Bucket(AWSObject):
props = {
'AccessControl': (basestring, False),
'Tags': (Tags, False),
'WebsiteConfiguration': (WebsiteConfiguration, False)
}
access_control_types = [
'Private',
'PublicRead',
'PublicReadWrite',
'AuthenticatedRead',
'BucketOwnerRead',
'BucketOwnerFullControl',
]
def __init__(self, name, **kwargs):
self.type = "AWS::S3::Bucket"
sup = super(Bucket, self)
sup.__init__(name, self.type, "Properties", self.props, **kwargs)
if 'AccessControl' in kwargs:
if kwargs['AccessControl'] not in self.access_control_types:
raise ValueError('AccessControl must be one of "%s"' % (
', '.join(self.access_control_types)))
|
Support for creating S3 buckets.# Copyright (c) 2013, Bob Van Zant <bob@veznat.com>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty, Tags
class WebsiteConfiguration(AWSProperty):
props = {
'IndexDocument': (basestring, False),
'ErrorDocument': (basestring, False),
}
class Bucket(AWSObject):
props = {
'AccessControl': (basestring, False),
'Tags': (Tags, False),
'WebsiteConfiguration': (WebsiteConfiguration, False)
}
access_control_types = [
'Private',
'PublicRead',
'PublicReadWrite',
'AuthenticatedRead',
'BucketOwnerRead',
'BucketOwnerFullControl',
]
def __init__(self, name, **kwargs):
self.type = "AWS::S3::Bucket"
sup = super(Bucket, self)
sup.__init__(name, self.type, "Properties", self.props, **kwargs)
if 'AccessControl' in kwargs:
if kwargs['AccessControl'] not in self.access_control_types:
raise ValueError('AccessControl must be one of "%s"' % (
', '.join(self.access_control_types)))
|
<commit_before><commit_msg>Support for creating S3 buckets.<commit_after># Copyright (c) 2013, Bob Van Zant <bob@veznat.com>
# All rights reserved.
#
# See LICENSE file for full license.
from . import AWSObject, AWSProperty, Tags
class WebsiteConfiguration(AWSProperty):
props = {
'IndexDocument': (basestring, False),
'ErrorDocument': (basestring, False),
}
class Bucket(AWSObject):
props = {
'AccessControl': (basestring, False),
'Tags': (Tags, False),
'WebsiteConfiguration': (WebsiteConfiguration, False)
}
access_control_types = [
'Private',
'PublicRead',
'PublicReadWrite',
'AuthenticatedRead',
'BucketOwnerRead',
'BucketOwnerFullControl',
]
def __init__(self, name, **kwargs):
self.type = "AWS::S3::Bucket"
sup = super(Bucket, self)
sup.__init__(name, self.type, "Properties", self.props, **kwargs)
if 'AccessControl' in kwargs:
if kwargs['AccessControl'] not in self.access_control_types:
raise ValueError('AccessControl must be one of "%s"' % (
', '.join(self.access_control_types)))
|
|
5b13240d65f70f6418ca92b7cff81bc290e26fef
|
demo/ipython_notebook_config.py
|
demo/ipython_notebook_config.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Configuration file for ipython-notebook.
c = get_config()
c.NotebookApp.ip = '*'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 8888
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
c.NotebookApp.trust_xheaders = True
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
c.NotebookApp.tornado_settings = {
'template_path':['/srv/ga/', '/srv/ipython/IPython/html',
'/srv/ipython/IPython/html/templates']
}
|
Configure the IPython notebook user settings
|
Configure the IPython notebook user settings
|
Python
|
bsd-3-clause
|
Zsailer/docker-jupyter-teaching,odewahn/docker-demo-images,danielballan/docker-demo-images,willjharmer/docker-demo-images,tanyaschlusser/docker-demo-images,pelucid/docker-demo-images,Zsailer/docker-demo-images,rgbkrk/docker-demo-images,vanceb/docker-demo-images,parente/docker-demo-images,philipz/docker-demo-images,philipz/docker-demo-images,modulexcite/docker-demo-images,parente/docker-demo-images,dietmarw/jupyter-docker-images,iamjakob/docker-demo-images,dietmarw/jupyter-docker-images,odewahn/docker-demo-images,CognitiveScale/docker-demo-images,rgbkrk/docker-demo-images,tanyaschlusser/docker-demo-images,rgbkrk/docker-demo-images,philipz/docker-demo-images,vanceb/docker-demo-images,Zsailer/docker-demo-images,tanyaschlusser/docker-demo-images,mjbright/docker-demo-images,Zsailer/docker-jupyter-teaching,jupyter/docker-demo-images,danielballan/docker-demo-images,ericdill/docker-demo-images,Zsailer/docker-jupyter-teaching,modulexcite/docker-demo-images,parente/docker-demo-images,pelucid/docker-demo-images,Zsailer/docker-jupyter-teaching,dietmarw/jupyter-docker-images,jupyter/docker-demo-images,danielballan/docker-demo-images,odewahn/docker-demo-images,vanceb/docker-demo-images,willjharmer/docker-demo-images,jupyter/docker-demo-images,ericdill/docker-demo-images,CognitiveScale/docker-demo-images,mjbright/docker-demo-images,modulexcite/docker-demo-images,ericdill/docker-demo-images,CognitiveScale/docker-demo-images,Zsailer/docker-demo-images,CognitiveScale/docker-demo-images,iamjakob/docker-demo-images,pelucid/docker-demo-images,willjharmer/docker-demo-images,mjbright/docker-demo-images,iamjakob/docker-demo-images
|
Configure the IPython notebook user settings
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Configuration file for ipython-notebook.
c = get_config()
c.NotebookApp.ip = '*'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 8888
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
c.NotebookApp.trust_xheaders = True
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
c.NotebookApp.tornado_settings = {
'template_path':['/srv/ga/', '/srv/ipython/IPython/html',
'/srv/ipython/IPython/html/templates']
}
|
<commit_before><commit_msg>Configure the IPython notebook user settings<commit_after>
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Configuration file for ipython-notebook.
c = get_config()
c.NotebookApp.ip = '*'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 8888
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
c.NotebookApp.trust_xheaders = True
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
c.NotebookApp.tornado_settings = {
'template_path':['/srv/ga/', '/srv/ipython/IPython/html',
'/srv/ipython/IPython/html/templates']
}
|
Configure the IPython notebook user settings#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Configuration file for ipython-notebook.
c = get_config()
c.NotebookApp.ip = '*'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 8888
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
c.NotebookApp.trust_xheaders = True
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
c.NotebookApp.tornado_settings = {
'template_path':['/srv/ga/', '/srv/ipython/IPython/html',
'/srv/ipython/IPython/html/templates']
}
|
<commit_before><commit_msg>Configure the IPython notebook user settings<commit_after>#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Configuration file for ipython-notebook.
c = get_config()
c.NotebookApp.ip = '*'
c.NotebookApp.open_browser = False
c.NotebookApp.port = 8888
# Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-
# For headerssent by the upstream reverse proxy. Necessary if the proxy handles
# SSL
c.NotebookApp.trust_xheaders = True
# Supply overrides for the tornado.web.Application that the IPython notebook
# uses.
c.NotebookApp.tornado_settings = {
'template_path':['/srv/ga/', '/srv/ipython/IPython/html',
'/srv/ipython/IPython/html/templates']
}
|
|
b29fa5c96fa5e0fdd2117164baace9ac8492867d
|
testmodel/webapp/selenium/test/log-in-test.py
|
testmodel/webapp/selenium/test/log-in-test.py
|
import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class LoginTestCase(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
self.addCleanup(self.browser.quit)
self.browser.get('http://localhost:8080/intermine-demo/begin.do')
def testLogin(self):
login_link = self.browser.find_element_by_link_text('Log in')
self.assertIsNotNone(login_link)
login_link.click()
username = self.browser.find_element_by_name('username')
self.assertIsNotNone(username)
username.send_keys('intermine-test-user')
password = self.browser.find_element_by_name('password')
self.assertIsNotNone(password)
password.send_keys('intermine-test-user-password')
submit = self.browser.find_element_by_name('action')
submit.click()
logged_in_as = self.browser.find_element_by_css_selector('#loginbar li:nth-child(2)')
self.assertEqual('intermine-test-user', logged_in_as.text)
|
Test ability to log in
|
Test ability to log in
|
Python
|
lgpl-2.1
|
tomck/intermine,elsiklab/intermine,Arabidopsis-Information-Portal/intermine,Arabidopsis-Information-Portal/intermine,joshkh/intermine,joshkh/intermine,justincc/intermine,joshkh/intermine,elsiklab/intermine,zebrafishmine/intermine,elsiklab/intermine,kimrutherford/intermine,elsiklab/intermine,Arabidopsis-Information-Portal/intermine,joshkh/intermine,elsiklab/intermine,tomck/intermine,kimrutherford/intermine,zebrafishmine/intermine,tomck/intermine,justincc/intermine,joshkh/intermine,kimrutherford/intermine,JoeCarlson/intermine,kimrutherford/intermine,Arabidopsis-Information-Portal/intermine,tomck/intermine,zebrafishmine/intermine,JoeCarlson/intermine,joshkh/intermine,zebrafishmine/intermine,elsiklab/intermine,zebrafishmine/intermine,justincc/intermine,joshkh/intermine,Arabidopsis-Information-Portal/intermine,kimrutherford/intermine,kimrutherford/intermine,JoeCarlson/intermine,elsiklab/intermine,zebrafishmine/intermine,JoeCarlson/intermine,Arabidopsis-Information-Portal/intermine,tomck/intermine,justincc/intermine,JoeCarlson/intermine,zebrafishmine/intermine,tomck/intermine,Arabidopsis-Information-Portal/intermine,elsiklab/intermine,elsiklab/intermine,joshkh/intermine,tomck/intermine,zebrafishmine/intermine,justincc/intermine,kimrutherford/intermine,justincc/intermine,justincc/intermine,Arabidopsis-Information-Portal/intermine,justincc/intermine,tomck/intermine,justincc/intermine,JoeCarlson/intermine,JoeCarlson/intermine,joshkh/intermine,Arabidopsis-Information-Portal/intermine,tomck/intermine,kimrutherford/intermine,JoeCarlson/intermine,kimrutherford/intermine,JoeCarlson/intermine,zebrafishmine/intermine
|
Test ability to log in
|
import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class LoginTestCase(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
self.addCleanup(self.browser.quit)
self.browser.get('http://localhost:8080/intermine-demo/begin.do')
def testLogin(self):
login_link = self.browser.find_element_by_link_text('Log in')
self.assertIsNotNone(login_link)
login_link.click()
username = self.browser.find_element_by_name('username')
self.assertIsNotNone(username)
username.send_keys('intermine-test-user')
password = self.browser.find_element_by_name('password')
self.assertIsNotNone(password)
password.send_keys('intermine-test-user-password')
submit = self.browser.find_element_by_name('action')
submit.click()
logged_in_as = self.browser.find_element_by_css_selector('#loginbar li:nth-child(2)')
self.assertEqual('intermine-test-user', logged_in_as.text)
|
<commit_before><commit_msg>Test ability to log in<commit_after>
|
import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class LoginTestCase(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
self.addCleanup(self.browser.quit)
self.browser.get('http://localhost:8080/intermine-demo/begin.do')
def testLogin(self):
login_link = self.browser.find_element_by_link_text('Log in')
self.assertIsNotNone(login_link)
login_link.click()
username = self.browser.find_element_by_name('username')
self.assertIsNotNone(username)
username.send_keys('intermine-test-user')
password = self.browser.find_element_by_name('password')
self.assertIsNotNone(password)
password.send_keys('intermine-test-user-password')
submit = self.browser.find_element_by_name('action')
submit.click()
logged_in_as = self.browser.find_element_by_css_selector('#loginbar li:nth-child(2)')
self.assertEqual('intermine-test-user', logged_in_as.text)
|
Test ability to log inimport unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class LoginTestCase(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
self.addCleanup(self.browser.quit)
self.browser.get('http://localhost:8080/intermine-demo/begin.do')
def testLogin(self):
login_link = self.browser.find_element_by_link_text('Log in')
self.assertIsNotNone(login_link)
login_link.click()
username = self.browser.find_element_by_name('username')
self.assertIsNotNone(username)
username.send_keys('intermine-test-user')
password = self.browser.find_element_by_name('password')
self.assertIsNotNone(password)
password.send_keys('intermine-test-user-password')
submit = self.browser.find_element_by_name('action')
submit.click()
logged_in_as = self.browser.find_element_by_css_selector('#loginbar li:nth-child(2)')
self.assertEqual('intermine-test-user', logged_in_as.text)
|
<commit_before><commit_msg>Test ability to log in<commit_after>import unittest
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
class LoginTestCase(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
self.addCleanup(self.browser.quit)
self.browser.get('http://localhost:8080/intermine-demo/begin.do')
def testLogin(self):
login_link = self.browser.find_element_by_link_text('Log in')
self.assertIsNotNone(login_link)
login_link.click()
username = self.browser.find_element_by_name('username')
self.assertIsNotNone(username)
username.send_keys('intermine-test-user')
password = self.browser.find_element_by_name('password')
self.assertIsNotNone(password)
password.send_keys('intermine-test-user-password')
submit = self.browser.find_element_by_name('action')
submit.click()
logged_in_as = self.browser.find_element_by_css_selector('#loginbar li:nth-child(2)')
self.assertEqual('intermine-test-user', logged_in_as.text)
|
|
13f06e971967106fba98aace57b7ffa0b07201eb
|
comrade/functional.py
|
comrade/functional.py
|
def lazy(func):
def lazy_func(self, *args, **kwargs):
cached_attribute = '_cached_%s' % func.__name__
if not hasattr(self, cached_attribute):
setattr(self, cached_attribute, func(self, *args, **kwargs))
return getattr(self, cached_attribute)
return lazy_func
|
Add a @lazy decorator for simple lazy methods.
|
Add a @lazy decorator for simple lazy methods.
|
Python
|
mit
|
bueda/django-comrade
|
Add a @lazy decorator for simple lazy methods.
|
def lazy(func):
def lazy_func(self, *args, **kwargs):
cached_attribute = '_cached_%s' % func.__name__
if not hasattr(self, cached_attribute):
setattr(self, cached_attribute, func(self, *args, **kwargs))
return getattr(self, cached_attribute)
return lazy_func
|
<commit_before><commit_msg>Add a @lazy decorator for simple lazy methods.<commit_after>
|
def lazy(func):
def lazy_func(self, *args, **kwargs):
cached_attribute = '_cached_%s' % func.__name__
if not hasattr(self, cached_attribute):
setattr(self, cached_attribute, func(self, *args, **kwargs))
return getattr(self, cached_attribute)
return lazy_func
|
Add a @lazy decorator for simple lazy methods.def lazy(func):
def lazy_func(self, *args, **kwargs):
cached_attribute = '_cached_%s' % func.__name__
if not hasattr(self, cached_attribute):
setattr(self, cached_attribute, func(self, *args, **kwargs))
return getattr(self, cached_attribute)
return lazy_func
|
<commit_before><commit_msg>Add a @lazy decorator for simple lazy methods.<commit_after>def lazy(func):
def lazy_func(self, *args, **kwargs):
cached_attribute = '_cached_%s' % func.__name__
if not hasattr(self, cached_attribute):
setattr(self, cached_attribute, func(self, *args, **kwargs))
return getattr(self, cached_attribute)
return lazy_func
|
|
23a4e55ba85dde4447d7768e8e4ffd7f1efcc385
|
cme/modules/laps.py
|
cme/modules/laps.py
|
from impacket.ldap import ldapasn1 as ldapasn1_impacket
class CMEModule:
'''
Module by technobro refactored by @mpgn (now compatible with LDAP protocol + filter by computer)
Initial module:
@T3KX: https://github.com/T3KX/Crackmapexec-LAPS
Credit: @n00py1
Reference: https://www.n00py.io/2020/12/dumping-laps-passwords-from-linux/
https://github.com/n00py/LAPSDumper
'''
name = 'laps'
description = 'Retrieves the LAPS passwords'
supported_protocols = ['ldap']
opsec_safe = True
multiple_hosts = False
def options(self, context, module_options):
"""
COMPUTER Computer name or wildcard ex: WIN-S10, WIN-* etc. Default: *
"""
self.computer = "*"
if 'COMPUTER' in module_options:
self.computer = module_options['COMPUTER']
def on_login(self, context, connection):
context.log.info('Getting LAPS Passwords')
searchFilter = '(&(objectCategory=computer)(ms-MCS-AdmPwd=*)(name='+ self.computer +'))'
attributes = ['ms-MCS-AdmPwd','samAccountname']
result = connection.search(searchFilter, attributes, 10000)
for item in result:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
msMCSAdmPwd = ''
sAMAccountName = ''
for computer in item['attributes']:
if str(computer['type']) == "sAMAccountName":
sAMAccountName = str(computer['vals'][0])
else:
msMCSAdmPwd = str(computer['vals'][0])
context.log.highlight("Computer: {:<20} Password: {}".format(sAMAccountName, msMCSAdmPwd))
|
Add LAPS module thx to @T3KX
|
Add LAPS module thx to @T3KX
|
Python
|
bsd-2-clause
|
byt3bl33d3r/CrackMapExec
|
Add LAPS module thx to @T3KX
|
from impacket.ldap import ldapasn1 as ldapasn1_impacket
class CMEModule:
'''
Module by technobro refactored by @mpgn (now compatible with LDAP protocol + filter by computer)
Initial module:
@T3KX: https://github.com/T3KX/Crackmapexec-LAPS
Credit: @n00py1
Reference: https://www.n00py.io/2020/12/dumping-laps-passwords-from-linux/
https://github.com/n00py/LAPSDumper
'''
name = 'laps'
description = 'Retrieves the LAPS passwords'
supported_protocols = ['ldap']
opsec_safe = True
multiple_hosts = False
def options(self, context, module_options):
"""
COMPUTER Computer name or wildcard ex: WIN-S10, WIN-* etc. Default: *
"""
self.computer = "*"
if 'COMPUTER' in module_options:
self.computer = module_options['COMPUTER']
def on_login(self, context, connection):
context.log.info('Getting LAPS Passwords')
searchFilter = '(&(objectCategory=computer)(ms-MCS-AdmPwd=*)(name='+ self.computer +'))'
attributes = ['ms-MCS-AdmPwd','samAccountname']
result = connection.search(searchFilter, attributes, 10000)
for item in result:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
msMCSAdmPwd = ''
sAMAccountName = ''
for computer in item['attributes']:
if str(computer['type']) == "sAMAccountName":
sAMAccountName = str(computer['vals'][0])
else:
msMCSAdmPwd = str(computer['vals'][0])
context.log.highlight("Computer: {:<20} Password: {}".format(sAMAccountName, msMCSAdmPwd))
|
<commit_before><commit_msg>Add LAPS module thx to @T3KX<commit_after>
|
from impacket.ldap import ldapasn1 as ldapasn1_impacket
class CMEModule:
'''
Module by technobro refactored by @mpgn (now compatible with LDAP protocol + filter by computer)
Initial module:
@T3KX: https://github.com/T3KX/Crackmapexec-LAPS
Credit: @n00py1
Reference: https://www.n00py.io/2020/12/dumping-laps-passwords-from-linux/
https://github.com/n00py/LAPSDumper
'''
name = 'laps'
description = 'Retrieves the LAPS passwords'
supported_protocols = ['ldap']
opsec_safe = True
multiple_hosts = False
def options(self, context, module_options):
"""
COMPUTER Computer name or wildcard ex: WIN-S10, WIN-* etc. Default: *
"""
self.computer = "*"
if 'COMPUTER' in module_options:
self.computer = module_options['COMPUTER']
def on_login(self, context, connection):
context.log.info('Getting LAPS Passwords')
searchFilter = '(&(objectCategory=computer)(ms-MCS-AdmPwd=*)(name='+ self.computer +'))'
attributes = ['ms-MCS-AdmPwd','samAccountname']
result = connection.search(searchFilter, attributes, 10000)
for item in result:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
msMCSAdmPwd = ''
sAMAccountName = ''
for computer in item['attributes']:
if str(computer['type']) == "sAMAccountName":
sAMAccountName = str(computer['vals'][0])
else:
msMCSAdmPwd = str(computer['vals'][0])
context.log.highlight("Computer: {:<20} Password: {}".format(sAMAccountName, msMCSAdmPwd))
|
Add LAPS module thx to @T3KXfrom impacket.ldap import ldapasn1 as ldapasn1_impacket
class CMEModule:
'''
Module by technobro refactored by @mpgn (now compatible with LDAP protocol + filter by computer)
Initial module:
@T3KX: https://github.com/T3KX/Crackmapexec-LAPS
Credit: @n00py1
Reference: https://www.n00py.io/2020/12/dumping-laps-passwords-from-linux/
https://github.com/n00py/LAPSDumper
'''
name = 'laps'
description = 'Retrieves the LAPS passwords'
supported_protocols = ['ldap']
opsec_safe = True
multiple_hosts = False
def options(self, context, module_options):
"""
COMPUTER Computer name or wildcard ex: WIN-S10, WIN-* etc. Default: *
"""
self.computer = "*"
if 'COMPUTER' in module_options:
self.computer = module_options['COMPUTER']
def on_login(self, context, connection):
context.log.info('Getting LAPS Passwords')
searchFilter = '(&(objectCategory=computer)(ms-MCS-AdmPwd=*)(name='+ self.computer +'))'
attributes = ['ms-MCS-AdmPwd','samAccountname']
result = connection.search(searchFilter, attributes, 10000)
for item in result:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
msMCSAdmPwd = ''
sAMAccountName = ''
for computer in item['attributes']:
if str(computer['type']) == "sAMAccountName":
sAMAccountName = str(computer['vals'][0])
else:
msMCSAdmPwd = str(computer['vals'][0])
context.log.highlight("Computer: {:<20} Password: {}".format(sAMAccountName, msMCSAdmPwd))
|
<commit_before><commit_msg>Add LAPS module thx to @T3KX<commit_after>from impacket.ldap import ldapasn1 as ldapasn1_impacket
class CMEModule:
'''
Module by technobro refactored by @mpgn (now compatible with LDAP protocol + filter by computer)
Initial module:
@T3KX: https://github.com/T3KX/Crackmapexec-LAPS
Credit: @n00py1
Reference: https://www.n00py.io/2020/12/dumping-laps-passwords-from-linux/
https://github.com/n00py/LAPSDumper
'''
name = 'laps'
description = 'Retrieves the LAPS passwords'
supported_protocols = ['ldap']
opsec_safe = True
multiple_hosts = False
def options(self, context, module_options):
"""
COMPUTER Computer name or wildcard ex: WIN-S10, WIN-* etc. Default: *
"""
self.computer = "*"
if 'COMPUTER' in module_options:
self.computer = module_options['COMPUTER']
def on_login(self, context, connection):
context.log.info('Getting LAPS Passwords')
searchFilter = '(&(objectCategory=computer)(ms-MCS-AdmPwd=*)(name='+ self.computer +'))'
attributes = ['ms-MCS-AdmPwd','samAccountname']
result = connection.search(searchFilter, attributes, 10000)
for item in result:
if isinstance(item, ldapasn1_impacket.SearchResultEntry) is not True:
continue
msMCSAdmPwd = ''
sAMAccountName = ''
for computer in item['attributes']:
if str(computer['type']) == "sAMAccountName":
sAMAccountName = str(computer['vals'][0])
else:
msMCSAdmPwd = str(computer['vals'][0])
context.log.highlight("Computer: {:<20} Password: {}".format(sAMAccountName, msMCSAdmPwd))
|
|
3e0e9702becc5c7a8455b12113eb2341c27a34bd
|
CDJSVis/filtering/filters/tests/test_charge.py
|
CDJSVis/filtering/filters/tests/test_charge.py
|
"""
Unit tests for the charge filter
"""
import unittest
import numpy as np
from ....state import lattice
from .. import chargeFilter
from .. import base
################################################################################
class TestChargeFilter(unittest.TestCase):
"""
Test charge filter
"""
def setUp(self):
"""
Called before each test
"""
# generate lattice
self.lattice = lattice.Lattice()
self.lattice.addAtom("Au", [0,0,0], -3.0)
self.lattice.addAtom("Au", [1,0,0], -2.0)
self.lattice.addAtom("Au", [0,1,0], 1.0)
self.lattice.addAtom("Au", [0,0,1], -4.0)
self.lattice.addAtom("Au", [1,1,0], 4.0)
self.lattice.addAtom("Au", [0,1,1], 3.0)
self.lattice.addAtom("Au", [1,1,1], 1.0)
self.lattice.addAtom("Au", [2,0,0], -1.0)
self.lattice.addAtom("Au", [0,2,0], -2.0)
self.lattice.addAtom("Au", [0,0,2], 4.0)
# filter
self.filter = chargeFilter.ChargeFilter("Charge")
def tearDown(self):
"""
Called after each test
"""
# remove refs
self.lattice = None
self.filter = None
def test_chargeFilter(self):
"""
Charge filter
"""
# settings
settings = chargeFilter.ChargeFilterSettings()
settings.updateSetting("minCharge", -3.5)
settings.updateSetting("maxCharge", 0)
# set PBC
self.lattice.PBC[:] = 1
# filter input
filterInput = base.FilterInput()
filterInput.inputState = self.lattice
visibleAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)
filterInput.visibleAtoms = visibleAtoms
filterInput.NScalars = 0
filterInput.fullScalars = np.empty(0, np.float64)
filterInput.NVectors = 0
filterInput.fullVectors = np.empty(0, np.float64)
filterInput.ompNumThreads = 1
# call filter
result = self.filter.apply(filterInput, settings)
self.assertIsInstance(result, base.FilterResult)
# make sure num visible is correct
self.assertEqual(len(visibleAtoms), 4)
# check positions are correct
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[0])), [0,0,0])
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[1])), [1,0,0])
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[2])), [2,0,0])
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[3])), [0,2,0])
|
Add a test for the charge filter.
|
Add a test for the charge filter.
|
Python
|
mit
|
chrisdjscott/Atoman,chrisdjscott/Atoman,chrisdjscott/Atoman,chrisdjscott/Atoman,chrisdjscott/Atoman
|
Add a test for the charge filter.
|
"""
Unit tests for the charge filter
"""
import unittest
import numpy as np
from ....state import lattice
from .. import chargeFilter
from .. import base
################################################################################
class TestChargeFilter(unittest.TestCase):
"""
Test charge filter
"""
def setUp(self):
"""
Called before each test
"""
# generate lattice
self.lattice = lattice.Lattice()
self.lattice.addAtom("Au", [0,0,0], -3.0)
self.lattice.addAtom("Au", [1,0,0], -2.0)
self.lattice.addAtom("Au", [0,1,0], 1.0)
self.lattice.addAtom("Au", [0,0,1], -4.0)
self.lattice.addAtom("Au", [1,1,0], 4.0)
self.lattice.addAtom("Au", [0,1,1], 3.0)
self.lattice.addAtom("Au", [1,1,1], 1.0)
self.lattice.addAtom("Au", [2,0,0], -1.0)
self.lattice.addAtom("Au", [0,2,0], -2.0)
self.lattice.addAtom("Au", [0,0,2], 4.0)
# filter
self.filter = chargeFilter.ChargeFilter("Charge")
def tearDown(self):
"""
Called after each test
"""
# remove refs
self.lattice = None
self.filter = None
def test_chargeFilter(self):
"""
Charge filter
"""
# settings
settings = chargeFilter.ChargeFilterSettings()
settings.updateSetting("minCharge", -3.5)
settings.updateSetting("maxCharge", 0)
# set PBC
self.lattice.PBC[:] = 1
# filter input
filterInput = base.FilterInput()
filterInput.inputState = self.lattice
visibleAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)
filterInput.visibleAtoms = visibleAtoms
filterInput.NScalars = 0
filterInput.fullScalars = np.empty(0, np.float64)
filterInput.NVectors = 0
filterInput.fullVectors = np.empty(0, np.float64)
filterInput.ompNumThreads = 1
# call filter
result = self.filter.apply(filterInput, settings)
self.assertIsInstance(result, base.FilterResult)
# make sure num visible is correct
self.assertEqual(len(visibleAtoms), 4)
# check positions are correct
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[0])), [0,0,0])
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[1])), [1,0,0])
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[2])), [2,0,0])
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[3])), [0,2,0])
|
<commit_before><commit_msg>Add a test for the charge filter.<commit_after>
|
"""
Unit tests for the charge filter
"""
import unittest
import numpy as np
from ....state import lattice
from .. import chargeFilter
from .. import base
################################################################################
class TestChargeFilter(unittest.TestCase):
"""
Test charge filter
"""
def setUp(self):
"""
Called before each test
"""
# generate lattice
self.lattice = lattice.Lattice()
self.lattice.addAtom("Au", [0,0,0], -3.0)
self.lattice.addAtom("Au", [1,0,0], -2.0)
self.lattice.addAtom("Au", [0,1,0], 1.0)
self.lattice.addAtom("Au", [0,0,1], -4.0)
self.lattice.addAtom("Au", [1,1,0], 4.0)
self.lattice.addAtom("Au", [0,1,1], 3.0)
self.lattice.addAtom("Au", [1,1,1], 1.0)
self.lattice.addAtom("Au", [2,0,0], -1.0)
self.lattice.addAtom("Au", [0,2,0], -2.0)
self.lattice.addAtom("Au", [0,0,2], 4.0)
# filter
self.filter = chargeFilter.ChargeFilter("Charge")
def tearDown(self):
"""
Called after each test
"""
# remove refs
self.lattice = None
self.filter = None
def test_chargeFilter(self):
"""
Charge filter
"""
# settings
settings = chargeFilter.ChargeFilterSettings()
settings.updateSetting("minCharge", -3.5)
settings.updateSetting("maxCharge", 0)
# set PBC
self.lattice.PBC[:] = 1
# filter input
filterInput = base.FilterInput()
filterInput.inputState = self.lattice
visibleAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)
filterInput.visibleAtoms = visibleAtoms
filterInput.NScalars = 0
filterInput.fullScalars = np.empty(0, np.float64)
filterInput.NVectors = 0
filterInput.fullVectors = np.empty(0, np.float64)
filterInput.ompNumThreads = 1
# call filter
result = self.filter.apply(filterInput, settings)
self.assertIsInstance(result, base.FilterResult)
# make sure num visible is correct
self.assertEqual(len(visibleAtoms), 4)
# check positions are correct
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[0])), [0,0,0])
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[1])), [1,0,0])
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[2])), [2,0,0])
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[3])), [0,2,0])
|
Add a test for the charge filter.
"""
Unit tests for the charge filter
"""
import unittest
import numpy as np
from ....state import lattice
from .. import chargeFilter
from .. import base
################################################################################
class TestChargeFilter(unittest.TestCase):
"""
Test charge filter
"""
def setUp(self):
"""
Called before each test
"""
# generate lattice
self.lattice = lattice.Lattice()
self.lattice.addAtom("Au", [0,0,0], -3.0)
self.lattice.addAtom("Au", [1,0,0], -2.0)
self.lattice.addAtom("Au", [0,1,0], 1.0)
self.lattice.addAtom("Au", [0,0,1], -4.0)
self.lattice.addAtom("Au", [1,1,0], 4.0)
self.lattice.addAtom("Au", [0,1,1], 3.0)
self.lattice.addAtom("Au", [1,1,1], 1.0)
self.lattice.addAtom("Au", [2,0,0], -1.0)
self.lattice.addAtom("Au", [0,2,0], -2.0)
self.lattice.addAtom("Au", [0,0,2], 4.0)
# filter
self.filter = chargeFilter.ChargeFilter("Charge")
def tearDown(self):
"""
Called after each test
"""
# remove refs
self.lattice = None
self.filter = None
def test_chargeFilter(self):
"""
Charge filter
"""
# settings
settings = chargeFilter.ChargeFilterSettings()
settings.updateSetting("minCharge", -3.5)
settings.updateSetting("maxCharge", 0)
# set PBC
self.lattice.PBC[:] = 1
# filter input
filterInput = base.FilterInput()
filterInput.inputState = self.lattice
visibleAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)
filterInput.visibleAtoms = visibleAtoms
filterInput.NScalars = 0
filterInput.fullScalars = np.empty(0, np.float64)
filterInput.NVectors = 0
filterInput.fullVectors = np.empty(0, np.float64)
filterInput.ompNumThreads = 1
# call filter
result = self.filter.apply(filterInput, settings)
self.assertIsInstance(result, base.FilterResult)
# make sure num visible is correct
self.assertEqual(len(visibleAtoms), 4)
# check positions are correct
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[0])), [0,0,0])
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[1])), [1,0,0])
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[2])), [2,0,0])
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[3])), [0,2,0])
|
<commit_before><commit_msg>Add a test for the charge filter.<commit_after>
"""
Unit tests for the charge filter
"""
import unittest
import numpy as np
from ....state import lattice
from .. import chargeFilter
from .. import base
################################################################################
class TestChargeFilter(unittest.TestCase):
"""
Test charge filter
"""
def setUp(self):
"""
Called before each test
"""
# generate lattice
self.lattice = lattice.Lattice()
self.lattice.addAtom("Au", [0,0,0], -3.0)
self.lattice.addAtom("Au", [1,0,0], -2.0)
self.lattice.addAtom("Au", [0,1,0], 1.0)
self.lattice.addAtom("Au", [0,0,1], -4.0)
self.lattice.addAtom("Au", [1,1,0], 4.0)
self.lattice.addAtom("Au", [0,1,1], 3.0)
self.lattice.addAtom("Au", [1,1,1], 1.0)
self.lattice.addAtom("Au", [2,0,0], -1.0)
self.lattice.addAtom("Au", [0,2,0], -2.0)
self.lattice.addAtom("Au", [0,0,2], 4.0)
# filter
self.filter = chargeFilter.ChargeFilter("Charge")
def tearDown(self):
"""
Called after each test
"""
# remove refs
self.lattice = None
self.filter = None
def test_chargeFilter(self):
"""
Charge filter
"""
# settings
settings = chargeFilter.ChargeFilterSettings()
settings.updateSetting("minCharge", -3.5)
settings.updateSetting("maxCharge", 0)
# set PBC
self.lattice.PBC[:] = 1
# filter input
filterInput = base.FilterInput()
filterInput.inputState = self.lattice
visibleAtoms = np.arange(self.lattice.NAtoms, dtype=np.int32)
filterInput.visibleAtoms = visibleAtoms
filterInput.NScalars = 0
filterInput.fullScalars = np.empty(0, np.float64)
filterInput.NVectors = 0
filterInput.fullVectors = np.empty(0, np.float64)
filterInput.ompNumThreads = 1
# call filter
result = self.filter.apply(filterInput, settings)
self.assertIsInstance(result, base.FilterResult)
# make sure num visible is correct
self.assertEqual(len(visibleAtoms), 4)
# check positions are correct
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[0])), [0,0,0])
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[1])), [1,0,0])
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[2])), [2,0,0])
self.assertListEqual(list(self.lattice.atomPos(visibleAtoms[3])), [0,2,0])
|
|
ded110e2357ea400a64d90822efd43213374cfd4
|
fedmsg.d/logging.py
|
fedmsg.d/logging.py
|
# Setup fedmsg logging.
# See the following for constraints on this format https://bit.ly/Xn1WDn
bare_format = "[%(asctime)s][%(name)10s %(levelname)7s] %(message)s"
config = dict(
logging=dict(
version=1,
formatters=dict(
bare={
"datefmt": "%Y-%m-%d %H:%M:%S",
"format": bare_format
},
),
handlers=dict(
console={
"class": "logging.StreamHandler",
"formatter": "bare",
"level": "INFO",
"stream": "ext://sys.stdout",
}
),
loggers=dict(
fedmsg={
"level": "INFO",
"propagate": False,
"handlers": ["console"],
},
moksha={
"level": "INFO",
"propagate": False,
"handlers": ["console"],
},
),
),
)
|
# Setup fedmsg logging.
# See the following for constraints on this format https://bit.ly/Xn1WDn
bare_format = "[%(asctime)s][%(name)10s %(levelname)7s] %(message)s"
config = dict(
logging=dict(
version=1,
formatters=dict(
bare={
"datefmt": "%Y-%m-%d %H:%M:%S",
"format": bare_format
},
),
handlers=dict(
console={
"class": "logging.StreamHandler",
"formatter": "bare",
"level": "INFO",
"stream": "ext://sys.stdout",
},
),
loggers=dict(
fedmsg={
"level": "INFO",
"propagate": False,
"handlers": ["console"],
},
moksha={
"level": "INFO",
"propagate": False,
"handlers": ["console"],
},
),
),
)
|
Add a comma here to help out people who modify this file.
|
Add a comma here to help out people who modify this file.
See https://bugzilla.redhat.com/show_bug.cgi?id=1184523
|
Python
|
lgpl-2.1
|
maxamillion/fedmsg,maxamillion/fedmsg,maxamillion/fedmsg,vivekanand1101/fedmsg,cicku/fedmsg,pombredanne/fedmsg,mathstuf/fedmsg,chaiku/fedmsg,vivekanand1101/fedmsg,pombredanne/fedmsg,cicku/fedmsg,cicku/fedmsg,chaiku/fedmsg,fedora-infra/fedmsg,chaiku/fedmsg,mathstuf/fedmsg,fedora-infra/fedmsg,fedora-infra/fedmsg,vivekanand1101/fedmsg,pombredanne/fedmsg,mathstuf/fedmsg
|
# Setup fedmsg logging.
# See the following for constraints on this format https://bit.ly/Xn1WDn
bare_format = "[%(asctime)s][%(name)10s %(levelname)7s] %(message)s"
config = dict(
logging=dict(
version=1,
formatters=dict(
bare={
"datefmt": "%Y-%m-%d %H:%M:%S",
"format": bare_format
},
),
handlers=dict(
console={
"class": "logging.StreamHandler",
"formatter": "bare",
"level": "INFO",
"stream": "ext://sys.stdout",
}
),
loggers=dict(
fedmsg={
"level": "INFO",
"propagate": False,
"handlers": ["console"],
},
moksha={
"level": "INFO",
"propagate": False,
"handlers": ["console"],
},
),
),
)
Add a comma here to help out people who modify this file.
See https://bugzilla.redhat.com/show_bug.cgi?id=1184523
|
# Setup fedmsg logging.
# See the following for constraints on this format https://bit.ly/Xn1WDn
bare_format = "[%(asctime)s][%(name)10s %(levelname)7s] %(message)s"
config = dict(
logging=dict(
version=1,
formatters=dict(
bare={
"datefmt": "%Y-%m-%d %H:%M:%S",
"format": bare_format
},
),
handlers=dict(
console={
"class": "logging.StreamHandler",
"formatter": "bare",
"level": "INFO",
"stream": "ext://sys.stdout",
},
),
loggers=dict(
fedmsg={
"level": "INFO",
"propagate": False,
"handlers": ["console"],
},
moksha={
"level": "INFO",
"propagate": False,
"handlers": ["console"],
},
),
),
)
|
<commit_before># Setup fedmsg logging.
# See the following for constraints on this format https://bit.ly/Xn1WDn
bare_format = "[%(asctime)s][%(name)10s %(levelname)7s] %(message)s"
config = dict(
logging=dict(
version=1,
formatters=dict(
bare={
"datefmt": "%Y-%m-%d %H:%M:%S",
"format": bare_format
},
),
handlers=dict(
console={
"class": "logging.StreamHandler",
"formatter": "bare",
"level": "INFO",
"stream": "ext://sys.stdout",
}
),
loggers=dict(
fedmsg={
"level": "INFO",
"propagate": False,
"handlers": ["console"],
},
moksha={
"level": "INFO",
"propagate": False,
"handlers": ["console"],
},
),
),
)
<commit_msg>Add a comma here to help out people who modify this file.
See https://bugzilla.redhat.com/show_bug.cgi?id=1184523<commit_after>
|
# Setup fedmsg logging.
# See the following for constraints on this format https://bit.ly/Xn1WDn
bare_format = "[%(asctime)s][%(name)10s %(levelname)7s] %(message)s"
config = dict(
logging=dict(
version=1,
formatters=dict(
bare={
"datefmt": "%Y-%m-%d %H:%M:%S",
"format": bare_format
},
),
handlers=dict(
console={
"class": "logging.StreamHandler",
"formatter": "bare",
"level": "INFO",
"stream": "ext://sys.stdout",
},
),
loggers=dict(
fedmsg={
"level": "INFO",
"propagate": False,
"handlers": ["console"],
},
moksha={
"level": "INFO",
"propagate": False,
"handlers": ["console"],
},
),
),
)
|
# Setup fedmsg logging.
# See the following for constraints on this format https://bit.ly/Xn1WDn
bare_format = "[%(asctime)s][%(name)10s %(levelname)7s] %(message)s"
config = dict(
logging=dict(
version=1,
formatters=dict(
bare={
"datefmt": "%Y-%m-%d %H:%M:%S",
"format": bare_format
},
),
handlers=dict(
console={
"class": "logging.StreamHandler",
"formatter": "bare",
"level": "INFO",
"stream": "ext://sys.stdout",
}
),
loggers=dict(
fedmsg={
"level": "INFO",
"propagate": False,
"handlers": ["console"],
},
moksha={
"level": "INFO",
"propagate": False,
"handlers": ["console"],
},
),
),
)
Add a comma here to help out people who modify this file.
See https://bugzilla.redhat.com/show_bug.cgi?id=1184523# Setup fedmsg logging.
# See the following for constraints on this format https://bit.ly/Xn1WDn
bare_format = "[%(asctime)s][%(name)10s %(levelname)7s] %(message)s"
config = dict(
logging=dict(
version=1,
formatters=dict(
bare={
"datefmt": "%Y-%m-%d %H:%M:%S",
"format": bare_format
},
),
handlers=dict(
console={
"class": "logging.StreamHandler",
"formatter": "bare",
"level": "INFO",
"stream": "ext://sys.stdout",
},
),
loggers=dict(
fedmsg={
"level": "INFO",
"propagate": False,
"handlers": ["console"],
},
moksha={
"level": "INFO",
"propagate": False,
"handlers": ["console"],
},
),
),
)
|
<commit_before># Setup fedmsg logging.
# See the following for constraints on this format https://bit.ly/Xn1WDn
bare_format = "[%(asctime)s][%(name)10s %(levelname)7s] %(message)s"
config = dict(
logging=dict(
version=1,
formatters=dict(
bare={
"datefmt": "%Y-%m-%d %H:%M:%S",
"format": bare_format
},
),
handlers=dict(
console={
"class": "logging.StreamHandler",
"formatter": "bare",
"level": "INFO",
"stream": "ext://sys.stdout",
}
),
loggers=dict(
fedmsg={
"level": "INFO",
"propagate": False,
"handlers": ["console"],
},
moksha={
"level": "INFO",
"propagate": False,
"handlers": ["console"],
},
),
),
)
<commit_msg>Add a comma here to help out people who modify this file.
See https://bugzilla.redhat.com/show_bug.cgi?id=1184523<commit_after># Setup fedmsg logging.
# See the following for constraints on this format https://bit.ly/Xn1WDn
bare_format = "[%(asctime)s][%(name)10s %(levelname)7s] %(message)s"
config = dict(
logging=dict(
version=1,
formatters=dict(
bare={
"datefmt": "%Y-%m-%d %H:%M:%S",
"format": bare_format
},
),
handlers=dict(
console={
"class": "logging.StreamHandler",
"formatter": "bare",
"level": "INFO",
"stream": "ext://sys.stdout",
},
),
loggers=dict(
fedmsg={
"level": "INFO",
"propagate": False,
"handlers": ["console"],
},
moksha={
"level": "INFO",
"propagate": False,
"handlers": ["console"],
},
),
),
)
|
8b45094e20c69f8a5c9aa9ca74de40763d27b8f2
|
test_parallel_vectorize_numpy_2.py
|
test_parallel_vectorize_numpy_2.py
|
'''
Test parallel-vectorize with numpy.fromfunc.
Uses the work load from test_parallel_vectorize.
This time we pass a function pointer.
'''
from test_parallel_vectorize import *
import numpy as np
def main():
module = Module.new(__name__)
exe = CExecutor(module)
workdef = Work_D_D()
workfunc = workdef(module)
# get pointer to workfunc
workfunc_ptr = exe.engine.get_pointer_to_function(workfunc)
workdecl = CDeclare(workfunc.name, workfunc.type.pointee, workfunc_ptr)
spufdef = SpecializedParallelUFunc(ParallelUFuncPosix(num_thread=2),
UFuncCore_D_D(),
workdecl)
sppufunc = spufdef(module)
sppufunc.verify()
print(sppufunc)
module.verify()
mpm = PassManager.new()
pmbuilder = PassManagerBuilder.new()
pmbuilder.opt_level = 3
pmbuilder.populate(mpm)
mpm.run(module)
print(module)
# run
funcptr = exe.engine.get_pointer_to_function(sppufunc)
print("Function pointer: %x" % funcptr)
ptr_t = long # py2 only
# Becareful that fromfunc does not provide full error checking yet.
# If typenum is out-of-bound, we have nasty memory corruptions.
# For instance, -1 for typenum will cause segfault.
# If elements of type-list (2nd arg) is tuple instead,
# there will also memory corruption. (Seems like code rewrite.)
typenum = np.dtype(np.double).num
ufunc = np.fromfunc([ptr_t(funcptr)], [[typenum, typenum]], 1, 1, [None])
x = np.linspace(0., 10., 1000)
x.dtype=np.double
# print x
ans = ufunc(x)
# print ans
if not ( ans == x/2.345 ).all():
raise ValueError('Computation failed')
else:
print('Good')
if __name__ == '__main__':
main()
|
Add test that uses function pointer.
|
Add test that uses function pointer.
|
Python
|
bsd-3-clause
|
llvmpy/llvmpy,llvmpy/llvmpy,llvmpy/llvmpy,llvmpy/llvmpy,llvmpy/llvmpy,llvmpy/llvmpy
|
Add test that uses function pointer.
|
'''
Test parallel-vectorize with numpy.fromfunc.
Uses the work load from test_parallel_vectorize.
This time we pass a function pointer.
'''
from test_parallel_vectorize import *
import numpy as np
def main():
module = Module.new(__name__)
exe = CExecutor(module)
workdef = Work_D_D()
workfunc = workdef(module)
# get pointer to workfunc
workfunc_ptr = exe.engine.get_pointer_to_function(workfunc)
workdecl = CDeclare(workfunc.name, workfunc.type.pointee, workfunc_ptr)
spufdef = SpecializedParallelUFunc(ParallelUFuncPosix(num_thread=2),
UFuncCore_D_D(),
workdecl)
sppufunc = spufdef(module)
sppufunc.verify()
print(sppufunc)
module.verify()
mpm = PassManager.new()
pmbuilder = PassManagerBuilder.new()
pmbuilder.opt_level = 3
pmbuilder.populate(mpm)
mpm.run(module)
print(module)
# run
funcptr = exe.engine.get_pointer_to_function(sppufunc)
print("Function pointer: %x" % funcptr)
ptr_t = long # py2 only
# Becareful that fromfunc does not provide full error checking yet.
# If typenum is out-of-bound, we have nasty memory corruptions.
# For instance, -1 for typenum will cause segfault.
# If elements of type-list (2nd arg) is tuple instead,
# there will also memory corruption. (Seems like code rewrite.)
typenum = np.dtype(np.double).num
ufunc = np.fromfunc([ptr_t(funcptr)], [[typenum, typenum]], 1, 1, [None])
x = np.linspace(0., 10., 1000)
x.dtype=np.double
# print x
ans = ufunc(x)
# print ans
if not ( ans == x/2.345 ).all():
raise ValueError('Computation failed')
else:
print('Good')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add test that uses function pointer.<commit_after>
|
'''
Test parallel-vectorize with numpy.fromfunc.
Uses the work load from test_parallel_vectorize.
This time we pass a function pointer.
'''
from test_parallel_vectorize import *
import numpy as np
def main():
module = Module.new(__name__)
exe = CExecutor(module)
workdef = Work_D_D()
workfunc = workdef(module)
# get pointer to workfunc
workfunc_ptr = exe.engine.get_pointer_to_function(workfunc)
workdecl = CDeclare(workfunc.name, workfunc.type.pointee, workfunc_ptr)
spufdef = SpecializedParallelUFunc(ParallelUFuncPosix(num_thread=2),
UFuncCore_D_D(),
workdecl)
sppufunc = spufdef(module)
sppufunc.verify()
print(sppufunc)
module.verify()
mpm = PassManager.new()
pmbuilder = PassManagerBuilder.new()
pmbuilder.opt_level = 3
pmbuilder.populate(mpm)
mpm.run(module)
print(module)
# run
funcptr = exe.engine.get_pointer_to_function(sppufunc)
print("Function pointer: %x" % funcptr)
ptr_t = long # py2 only
# Becareful that fromfunc does not provide full error checking yet.
# If typenum is out-of-bound, we have nasty memory corruptions.
# For instance, -1 for typenum will cause segfault.
# If elements of type-list (2nd arg) is tuple instead,
# there will also memory corruption. (Seems like code rewrite.)
typenum = np.dtype(np.double).num
ufunc = np.fromfunc([ptr_t(funcptr)], [[typenum, typenum]], 1, 1, [None])
x = np.linspace(0., 10., 1000)
x.dtype=np.double
# print x
ans = ufunc(x)
# print ans
if not ( ans == x/2.345 ).all():
raise ValueError('Computation failed')
else:
print('Good')
if __name__ == '__main__':
main()
|
Add test that uses function pointer.'''
Test parallel-vectorize with numpy.fromfunc.
Uses the work load from test_parallel_vectorize.
This time we pass a function pointer.
'''
from test_parallel_vectorize import *
import numpy as np
def main():
module = Module.new(__name__)
exe = CExecutor(module)
workdef = Work_D_D()
workfunc = workdef(module)
# get pointer to workfunc
workfunc_ptr = exe.engine.get_pointer_to_function(workfunc)
workdecl = CDeclare(workfunc.name, workfunc.type.pointee, workfunc_ptr)
spufdef = SpecializedParallelUFunc(ParallelUFuncPosix(num_thread=2),
UFuncCore_D_D(),
workdecl)
sppufunc = spufdef(module)
sppufunc.verify()
print(sppufunc)
module.verify()
mpm = PassManager.new()
pmbuilder = PassManagerBuilder.new()
pmbuilder.opt_level = 3
pmbuilder.populate(mpm)
mpm.run(module)
print(module)
# run
funcptr = exe.engine.get_pointer_to_function(sppufunc)
print("Function pointer: %x" % funcptr)
ptr_t = long # py2 only
# Becareful that fromfunc does not provide full error checking yet.
# If typenum is out-of-bound, we have nasty memory corruptions.
# For instance, -1 for typenum will cause segfault.
# If elements of type-list (2nd arg) is tuple instead,
# there will also memory corruption. (Seems like code rewrite.)
typenum = np.dtype(np.double).num
ufunc = np.fromfunc([ptr_t(funcptr)], [[typenum, typenum]], 1, 1, [None])
x = np.linspace(0., 10., 1000)
x.dtype=np.double
# print x
ans = ufunc(x)
# print ans
if not ( ans == x/2.345 ).all():
raise ValueError('Computation failed')
else:
print('Good')
if __name__ == '__main__':
main()
|
<commit_before><commit_msg>Add test that uses function pointer.<commit_after>'''
Test parallel-vectorize with numpy.fromfunc.
Uses the work load from test_parallel_vectorize.
This time we pass a function pointer.
'''
from test_parallel_vectorize import *
import numpy as np
def main():
module = Module.new(__name__)
exe = CExecutor(module)
workdef = Work_D_D()
workfunc = workdef(module)
# get pointer to workfunc
workfunc_ptr = exe.engine.get_pointer_to_function(workfunc)
workdecl = CDeclare(workfunc.name, workfunc.type.pointee, workfunc_ptr)
spufdef = SpecializedParallelUFunc(ParallelUFuncPosix(num_thread=2),
UFuncCore_D_D(),
workdecl)
sppufunc = spufdef(module)
sppufunc.verify()
print(sppufunc)
module.verify()
mpm = PassManager.new()
pmbuilder = PassManagerBuilder.new()
pmbuilder.opt_level = 3
pmbuilder.populate(mpm)
mpm.run(module)
print(module)
# run
funcptr = exe.engine.get_pointer_to_function(sppufunc)
print("Function pointer: %x" % funcptr)
ptr_t = long # py2 only
# Becareful that fromfunc does not provide full error checking yet.
# If typenum is out-of-bound, we have nasty memory corruptions.
# For instance, -1 for typenum will cause segfault.
# If elements of type-list (2nd arg) is tuple instead,
# there will also memory corruption. (Seems like code rewrite.)
typenum = np.dtype(np.double).num
ufunc = np.fromfunc([ptr_t(funcptr)], [[typenum, typenum]], 1, 1, [None])
x = np.linspace(0., 10., 1000)
x.dtype=np.double
# print x
ans = ufunc(x)
# print ans
if not ( ans == x/2.345 ).all():
raise ValueError('Computation failed')
else:
print('Good')
if __name__ == '__main__':
main()
|
|
3bc56faeb1d12082b2ceee147c37ae42bd7713ca
|
tests/test_serialization_format.py
|
tests/test_serialization_format.py
|
# Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
import unittest
import dimod
from dimod.serialization.format import sampleset_to_string
class Test_sampleset_to_string(unittest.TestCase):
def test_smoke(self):
# test that nothing falls down or explodes, most 'tests' would be in
# the doctests
samples = dimod.ExactSolver().sample_ising({v: -v - 1 for v in range(5)}, {})
str(samples)
|
Add one smoketest to str(sampleset)
|
Add one smoketest to str(sampleset)
|
Python
|
apache-2.0
|
oneklc/dimod,oneklc/dimod
|
Add one smoketest to str(sampleset)
|
# Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
import unittest
import dimod
from dimod.serialization.format import sampleset_to_string
class Test_sampleset_to_string(unittest.TestCase):
def test_smoke(self):
# test that nothing falls down or explodes, most 'tests' would be in
# the doctests
samples = dimod.ExactSolver().sample_ising({v: -v - 1 for v in range(5)}, {})
str(samples)
|
<commit_before><commit_msg>Add one smoketest to str(sampleset)<commit_after>
|
# Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
import unittest
import dimod
from dimod.serialization.format import sampleset_to_string
class Test_sampleset_to_string(unittest.TestCase):
def test_smoke(self):
# test that nothing falls down or explodes, most 'tests' would be in
# the doctests
samples = dimod.ExactSolver().sample_ising({v: -v - 1 for v in range(5)}, {})
str(samples)
|
Add one smoketest to str(sampleset)# Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
import unittest
import dimod
from dimod.serialization.format import sampleset_to_string
class Test_sampleset_to_string(unittest.TestCase):
def test_smoke(self):
# test that nothing falls down or explodes, most 'tests' would be in
# the doctests
samples = dimod.ExactSolver().sample_ising({v: -v - 1 for v in range(5)}, {})
str(samples)
|
<commit_before><commit_msg>Add one smoketest to str(sampleset)<commit_after># Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# =============================================================================
import unittest
import dimod
from dimod.serialization.format import sampleset_to_string
class Test_sampleset_to_string(unittest.TestCase):
def test_smoke(self):
# test that nothing falls down or explodes, most 'tests' would be in
# the doctests
samples = dimod.ExactSolver().sample_ising({v: -v - 1 for v in range(5)}, {})
str(samples)
|
|
cd1620bb1b617f8413eafcf93c3da3ac43bfa088
|
db_connection_pool.py
|
db_connection_pool.py
|
class ConnectionPool():
"""
Usage:
conn_pool = nmi_mysql.ConnectionPool(config)
db = conn_pool.get_connection()
db.query('SELECT 1', [])
conn_pool.return_connection(db)
conn_pool.close()
"""
def __init__(self, conf, max_pool_size=20):
self.conf = conf
self.max_pool_size = max_pool_size
self.initialize_pool()
def initialize_pool(self):
self.pool = Queue(maxsize=self.max_pool_size)
for _ in range(0, self.max_pool_size):
self.pool.put_nowait(DB(self.conf, True))
def get_connection(self):
# returns a db instance when one is available else waits until one is
db = self.pool.get(True)
# checks if db is still connected because db instance automatically closes when not in used
if not self.ping(db):
db.connect()
return db
def return_connection(self, db):
return self.pool.put_nowait(db)
def close(self):
while not self.is_empty():
self.pool.get().close()
def ping(self, db):
data = db.query('SELECT 1', [])
return data
def get_initialized_connection_pool(self):
return self.pool
def is_empty(self):
return self.pool.empty()
|
Add db connection pool example
|
Add db connection pool example
|
Python
|
mit
|
voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts
|
Add db connection pool example
|
class ConnectionPool():
"""
Usage:
conn_pool = nmi_mysql.ConnectionPool(config)
db = conn_pool.get_connection()
db.query('SELECT 1', [])
conn_pool.return_connection(db)
conn_pool.close()
"""
def __init__(self, conf, max_pool_size=20):
self.conf = conf
self.max_pool_size = max_pool_size
self.initialize_pool()
def initialize_pool(self):
self.pool = Queue(maxsize=self.max_pool_size)
for _ in range(0, self.max_pool_size):
self.pool.put_nowait(DB(self.conf, True))
def get_connection(self):
# returns a db instance when one is available else waits until one is
db = self.pool.get(True)
# checks if db is still connected because db instance automatically closes when not in used
if not self.ping(db):
db.connect()
return db
def return_connection(self, db):
return self.pool.put_nowait(db)
def close(self):
while not self.is_empty():
self.pool.get().close()
def ping(self, db):
data = db.query('SELECT 1', [])
return data
def get_initialized_connection_pool(self):
return self.pool
def is_empty(self):
return self.pool.empty()
|
<commit_before><commit_msg>Add db connection pool example<commit_after>
|
class ConnectionPool():
"""
Usage:
conn_pool = nmi_mysql.ConnectionPool(config)
db = conn_pool.get_connection()
db.query('SELECT 1', [])
conn_pool.return_connection(db)
conn_pool.close()
"""
def __init__(self, conf, max_pool_size=20):
self.conf = conf
self.max_pool_size = max_pool_size
self.initialize_pool()
def initialize_pool(self):
self.pool = Queue(maxsize=self.max_pool_size)
for _ in range(0, self.max_pool_size):
self.pool.put_nowait(DB(self.conf, True))
def get_connection(self):
# returns a db instance when one is available else waits until one is
db = self.pool.get(True)
# checks if db is still connected because db instance automatically closes when not in used
if not self.ping(db):
db.connect()
return db
def return_connection(self, db):
return self.pool.put_nowait(db)
def close(self):
while not self.is_empty():
self.pool.get().close()
def ping(self, db):
data = db.query('SELECT 1', [])
return data
def get_initialized_connection_pool(self):
return self.pool
def is_empty(self):
return self.pool.empty()
|
Add db connection pool exampleclass ConnectionPool():
"""
Usage:
conn_pool = nmi_mysql.ConnectionPool(config)
db = conn_pool.get_connection()
db.query('SELECT 1', [])
conn_pool.return_connection(db)
conn_pool.close()
"""
def __init__(self, conf, max_pool_size=20):
self.conf = conf
self.max_pool_size = max_pool_size
self.initialize_pool()
def initialize_pool(self):
self.pool = Queue(maxsize=self.max_pool_size)
for _ in range(0, self.max_pool_size):
self.pool.put_nowait(DB(self.conf, True))
def get_connection(self):
# returns a db instance when one is available else waits until one is
db = self.pool.get(True)
# checks if db is still connected because db instance automatically closes when not in used
if not self.ping(db):
db.connect()
return db
def return_connection(self, db):
return self.pool.put_nowait(db)
def close(self):
while not self.is_empty():
self.pool.get().close()
def ping(self, db):
data = db.query('SELECT 1', [])
return data
def get_initialized_connection_pool(self):
return self.pool
def is_empty(self):
return self.pool.empty()
|
<commit_before><commit_msg>Add db connection pool example<commit_after>class ConnectionPool():
"""
Usage:
conn_pool = nmi_mysql.ConnectionPool(config)
db = conn_pool.get_connection()
db.query('SELECT 1', [])
conn_pool.return_connection(db)
conn_pool.close()
"""
def __init__(self, conf, max_pool_size=20):
self.conf = conf
self.max_pool_size = max_pool_size
self.initialize_pool()
def initialize_pool(self):
self.pool = Queue(maxsize=self.max_pool_size)
for _ in range(0, self.max_pool_size):
self.pool.put_nowait(DB(self.conf, True))
def get_connection(self):
# returns a db instance when one is available else waits until one is
db = self.pool.get(True)
# checks if db is still connected because db instance automatically closes when not in used
if not self.ping(db):
db.connect()
return db
def return_connection(self, db):
return self.pool.put_nowait(db)
def close(self):
while not self.is_empty():
self.pool.get().close()
def ping(self, db):
data = db.query('SELECT 1', [])
return data
def get_initialized_connection_pool(self):
return self.pool
def is_empty(self):
return self.pool.empty()
|
|
e64df5ab8307c6f6120735816166c6ff8ffeccfc
|
neural_network/neural_network.py
|
neural_network/neural_network.py
|
#!/usr/bin/env python
import math
import numpy as np
def sigmoid(x):
return 1.0 / (1 + np.exp(-x))
def sigmoid_derivate(x):
return x * (1 - x)
def main():
np.random.seed(1)
# [4, 3]
features = np.array([[0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
# [4, 1]
labels = np.array([[0], [0], [1], [1]])
# weights1 = 2 * np.random.random((3,1)) - 1
weights1 = np.array([[1.0], [1.0], [1.0]])
epoch_number = 100
learning_rate = 0.01
for i in range(epoch_number):
input_layer = features
layer1 = sigmoid(np.dot(input_layer, weights1))
difference1 = labels - layer1
delta1 = -1.0 * difference1 * sigmoid_derivate(layer1)
grad = np.dot(input_layer.T, delta1)
weights1 -= learning_rate * grad
print("Current weights is: {}".format(weights1))
test_dataset = [[0, 0, 1]]
predict_propability = sigmoid(np.dot(test_dataset, weights1))
print("The predict propability is: {}".format(predict_propability))
if __name__ == "__main__":
main()
|
Implement backpropagation algorithm for neural network
|
Implement backpropagation algorithm for neural network
|
Python
|
mit
|
tobegit3hub/ml_implementation,erwin00776/copy_ml_implements
|
Implement backpropagation algorithm for neural network
|
#!/usr/bin/env python
import math
import numpy as np
def sigmoid(x):
return 1.0 / (1 + np.exp(-x))
def sigmoid_derivate(x):
return x * (1 - x)
def main():
np.random.seed(1)
# [4, 3]
features = np.array([[0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
# [4, 1]
labels = np.array([[0], [0], [1], [1]])
# weights1 = 2 * np.random.random((3,1)) - 1
weights1 = np.array([[1.0], [1.0], [1.0]])
epoch_number = 100
learning_rate = 0.01
for i in range(epoch_number):
input_layer = features
layer1 = sigmoid(np.dot(input_layer, weights1))
difference1 = labels - layer1
delta1 = -1.0 * difference1 * sigmoid_derivate(layer1)
grad = np.dot(input_layer.T, delta1)
weights1 -= learning_rate * grad
print("Current weights is: {}".format(weights1))
test_dataset = [[0, 0, 1]]
predict_propability = sigmoid(np.dot(test_dataset, weights1))
print("The predict propability is: {}".format(predict_propability))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Implement backpropagation algorithm for neural network<commit_after>
|
#!/usr/bin/env python
import math
import numpy as np
def sigmoid(x):
return 1.0 / (1 + np.exp(-x))
def sigmoid_derivate(x):
return x * (1 - x)
def main():
np.random.seed(1)
# [4, 3]
features = np.array([[0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
# [4, 1]
labels = np.array([[0], [0], [1], [1]])
# weights1 = 2 * np.random.random((3,1)) - 1
weights1 = np.array([[1.0], [1.0], [1.0]])
epoch_number = 100
learning_rate = 0.01
for i in range(epoch_number):
input_layer = features
layer1 = sigmoid(np.dot(input_layer, weights1))
difference1 = labels - layer1
delta1 = -1.0 * difference1 * sigmoid_derivate(layer1)
grad = np.dot(input_layer.T, delta1)
weights1 -= learning_rate * grad
print("Current weights is: {}".format(weights1))
test_dataset = [[0, 0, 1]]
predict_propability = sigmoid(np.dot(test_dataset, weights1))
print("The predict propability is: {}".format(predict_propability))
if __name__ == "__main__":
main()
|
Implement backpropagation algorithm for neural network#!/usr/bin/env python
import math
import numpy as np
def sigmoid(x):
return 1.0 / (1 + np.exp(-x))
def sigmoid_derivate(x):
return x * (1 - x)
def main():
np.random.seed(1)
# [4, 3]
features = np.array([[0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
# [4, 1]
labels = np.array([[0], [0], [1], [1]])
# weights1 = 2 * np.random.random((3,1)) - 1
weights1 = np.array([[1.0], [1.0], [1.0]])
epoch_number = 100
learning_rate = 0.01
for i in range(epoch_number):
input_layer = features
layer1 = sigmoid(np.dot(input_layer, weights1))
difference1 = labels - layer1
delta1 = -1.0 * difference1 * sigmoid_derivate(layer1)
grad = np.dot(input_layer.T, delta1)
weights1 -= learning_rate * grad
print("Current weights is: {}".format(weights1))
test_dataset = [[0, 0, 1]]
predict_propability = sigmoid(np.dot(test_dataset, weights1))
print("The predict propability is: {}".format(predict_propability))
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Implement backpropagation algorithm for neural network<commit_after>#!/usr/bin/env python
import math
import numpy as np
def sigmoid(x):
return 1.0 / (1 + np.exp(-x))
def sigmoid_derivate(x):
return x * (1 - x)
def main():
np.random.seed(1)
# [4, 3]
features = np.array([[0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 1]])
# [4, 1]
labels = np.array([[0], [0], [1], [1]])
# weights1 = 2 * np.random.random((3,1)) - 1
weights1 = np.array([[1.0], [1.0], [1.0]])
epoch_number = 100
learning_rate = 0.01
for i in range(epoch_number):
input_layer = features
layer1 = sigmoid(np.dot(input_layer, weights1))
difference1 = labels - layer1
delta1 = -1.0 * difference1 * sigmoid_derivate(layer1)
grad = np.dot(input_layer.T, delta1)
weights1 -= learning_rate * grad
print("Current weights is: {}".format(weights1))
test_dataset = [[0, 0, 1]]
predict_propability = sigmoid(np.dot(test_dataset, weights1))
print("The predict propability is: {}".format(predict_propability))
if __name__ == "__main__":
main()
|
|
15ec7cf519ce98ddb6dee50de2a2b3c6ace44680
|
l10n_br_sale/tests/test_l10n_br_sale_product.py
|
l10n_br_sale/tests/test_l10n_br_sale_product.py
|
# @ 2019 Akretion - www.akretion.com.br -
# Magno Costa <magno.costa@akretion.com.br>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import odoo.tests.common as common
class TestL10nBRSaleProduct(common.TransactionCase):
def setUp(self):
super(TestL10nBRSaleProduct, self).setUp()
self.sale_object = self.env["sale.order"]
self.sale_stock = self.sale_object.browse(
self.ref("l10n_br_sale_product.l10n_br_sale_product_demo_1")
)
def test_l10n_br_sale_product(self):
"""Test fields implemented by l10n_br_sale_product"""
self.sale_stock.onchange_partner_id()
self.sale_stock.onchange_partner_shipping_id()
for line in self.sale_stock.order_line:
line.product_id_change()
line.onchange_fiscal()
self.assertTrue(
line.fiscal_position_id,
"Error to mapping Fiscal Position on Sale Order Line.",
)
self.assertEquals(
self.sale_stock.amount_total,
7473.3,
u"Error to apply discount on sale order.",
)
self.assertEquals(
self.sale_stock.amount_freight,
6.0,
u"Error to calculate Total Amount Freight.",
)
self.assertEquals(
self.sale_stock.amount_costs, 2.0, u"Error to calculate Total Amount Costs."
)
self.assertEquals(
self.sale_stock.amount_extra, 12.0, u"Error to calculate Total Amount Extra"
)
self.assertEquals(
self.sale_stock.amount_insurance,
4.0,
u"Error to calculate Total Amount Extra",
)
self.sale_stock.action_confirm()
# Create and check invoice
self.sale_stock.action_invoice_create(final=True)
for invoice in self.sale_stock.invoice_ids:
self.assertEquals(
invoice.amount_untaxed,
7315.0,
u"Error to apply discount on invoice" u" created from sale order.",
)
for line in invoice.invoice_line_ids:
self.assertTrue(
line.company_id,
"Error to inform field company_id on Sale Order Line.",
)
self.assertTrue(
line.partner_id,
"Error to inform field partner_id on Sale Order Line.",
)
|
Move test from l10n_br_sale_product to l10n_br_sale.
|
[12.0][MIG][WIP] Move test from l10n_br_sale_product to l10n_br_sale.
|
Python
|
agpl-3.0
|
OCA/l10n-brazil,OCA/l10n-brazil,OCA/l10n-brazil,akretion/l10n-brazil,akretion/l10n-brazil,akretion/l10n-brazil
|
[12.0][MIG][WIP] Move test from l10n_br_sale_product to l10n_br_sale.
|
# @ 2019 Akretion - www.akretion.com.br -
# Magno Costa <magno.costa@akretion.com.br>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import odoo.tests.common as common
class TestL10nBRSaleProduct(common.TransactionCase):
def setUp(self):
super(TestL10nBRSaleProduct, self).setUp()
self.sale_object = self.env["sale.order"]
self.sale_stock = self.sale_object.browse(
self.ref("l10n_br_sale_product.l10n_br_sale_product_demo_1")
)
def test_l10n_br_sale_product(self):
"""Test fields implemented by l10n_br_sale_product"""
self.sale_stock.onchange_partner_id()
self.sale_stock.onchange_partner_shipping_id()
for line in self.sale_stock.order_line:
line.product_id_change()
line.onchange_fiscal()
self.assertTrue(
line.fiscal_position_id,
"Error to mapping Fiscal Position on Sale Order Line.",
)
self.assertEquals(
self.sale_stock.amount_total,
7473.3,
u"Error to apply discount on sale order.",
)
self.assertEquals(
self.sale_stock.amount_freight,
6.0,
u"Error to calculate Total Amount Freight.",
)
self.assertEquals(
self.sale_stock.amount_costs, 2.0, u"Error to calculate Total Amount Costs."
)
self.assertEquals(
self.sale_stock.amount_extra, 12.0, u"Error to calculate Total Amount Extra"
)
self.assertEquals(
self.sale_stock.amount_insurance,
4.0,
u"Error to calculate Total Amount Extra",
)
self.sale_stock.action_confirm()
# Create and check invoice
self.sale_stock.action_invoice_create(final=True)
for invoice in self.sale_stock.invoice_ids:
self.assertEquals(
invoice.amount_untaxed,
7315.0,
u"Error to apply discount on invoice" u" created from sale order.",
)
for line in invoice.invoice_line_ids:
self.assertTrue(
line.company_id,
"Error to inform field company_id on Sale Order Line.",
)
self.assertTrue(
line.partner_id,
"Error to inform field partner_id on Sale Order Line.",
)
|
<commit_before><commit_msg>[12.0][MIG][WIP] Move test from l10n_br_sale_product to l10n_br_sale.<commit_after>
|
# @ 2019 Akretion - www.akretion.com.br -
# Magno Costa <magno.costa@akretion.com.br>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import odoo.tests.common as common
class TestL10nBRSaleProduct(common.TransactionCase):
def setUp(self):
super(TestL10nBRSaleProduct, self).setUp()
self.sale_object = self.env["sale.order"]
self.sale_stock = self.sale_object.browse(
self.ref("l10n_br_sale_product.l10n_br_sale_product_demo_1")
)
def test_l10n_br_sale_product(self):
"""Test fields implemented by l10n_br_sale_product"""
self.sale_stock.onchange_partner_id()
self.sale_stock.onchange_partner_shipping_id()
for line in self.sale_stock.order_line:
line.product_id_change()
line.onchange_fiscal()
self.assertTrue(
line.fiscal_position_id,
"Error to mapping Fiscal Position on Sale Order Line.",
)
self.assertEquals(
self.sale_stock.amount_total,
7473.3,
u"Error to apply discount on sale order.",
)
self.assertEquals(
self.sale_stock.amount_freight,
6.0,
u"Error to calculate Total Amount Freight.",
)
self.assertEquals(
self.sale_stock.amount_costs, 2.0, u"Error to calculate Total Amount Costs."
)
self.assertEquals(
self.sale_stock.amount_extra, 12.0, u"Error to calculate Total Amount Extra"
)
self.assertEquals(
self.sale_stock.amount_insurance,
4.0,
u"Error to calculate Total Amount Extra",
)
self.sale_stock.action_confirm()
# Create and check invoice
self.sale_stock.action_invoice_create(final=True)
for invoice in self.sale_stock.invoice_ids:
self.assertEquals(
invoice.amount_untaxed,
7315.0,
u"Error to apply discount on invoice" u" created from sale order.",
)
for line in invoice.invoice_line_ids:
self.assertTrue(
line.company_id,
"Error to inform field company_id on Sale Order Line.",
)
self.assertTrue(
line.partner_id,
"Error to inform field partner_id on Sale Order Line.",
)
|
[12.0][MIG][WIP] Move test from l10n_br_sale_product to l10n_br_sale.# @ 2019 Akretion - www.akretion.com.br -
# Magno Costa <magno.costa@akretion.com.br>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import odoo.tests.common as common
class TestL10nBRSaleProduct(common.TransactionCase):
def setUp(self):
super(TestL10nBRSaleProduct, self).setUp()
self.sale_object = self.env["sale.order"]
self.sale_stock = self.sale_object.browse(
self.ref("l10n_br_sale_product.l10n_br_sale_product_demo_1")
)
def test_l10n_br_sale_product(self):
"""Test fields implemented by l10n_br_sale_product"""
self.sale_stock.onchange_partner_id()
self.sale_stock.onchange_partner_shipping_id()
for line in self.sale_stock.order_line:
line.product_id_change()
line.onchange_fiscal()
self.assertTrue(
line.fiscal_position_id,
"Error to mapping Fiscal Position on Sale Order Line.",
)
self.assertEquals(
self.sale_stock.amount_total,
7473.3,
u"Error to apply discount on sale order.",
)
self.assertEquals(
self.sale_stock.amount_freight,
6.0,
u"Error to calculate Total Amount Freight.",
)
self.assertEquals(
self.sale_stock.amount_costs, 2.0, u"Error to calculate Total Amount Costs."
)
self.assertEquals(
self.sale_stock.amount_extra, 12.0, u"Error to calculate Total Amount Extra"
)
self.assertEquals(
self.sale_stock.amount_insurance,
4.0,
u"Error to calculate Total Amount Extra",
)
self.sale_stock.action_confirm()
# Create and check invoice
self.sale_stock.action_invoice_create(final=True)
for invoice in self.sale_stock.invoice_ids:
self.assertEquals(
invoice.amount_untaxed,
7315.0,
u"Error to apply discount on invoice" u" created from sale order.",
)
for line in invoice.invoice_line_ids:
self.assertTrue(
line.company_id,
"Error to inform field company_id on Sale Order Line.",
)
self.assertTrue(
line.partner_id,
"Error to inform field partner_id on Sale Order Line.",
)
|
<commit_before><commit_msg>[12.0][MIG][WIP] Move test from l10n_br_sale_product to l10n_br_sale.<commit_after># @ 2019 Akretion - www.akretion.com.br -
# Magno Costa <magno.costa@akretion.com.br>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
import odoo.tests.common as common
class TestL10nBRSaleProduct(common.TransactionCase):
def setUp(self):
super(TestL10nBRSaleProduct, self).setUp()
self.sale_object = self.env["sale.order"]
self.sale_stock = self.sale_object.browse(
self.ref("l10n_br_sale_product.l10n_br_sale_product_demo_1")
)
def test_l10n_br_sale_product(self):
"""Test fields implemented by l10n_br_sale_product"""
self.sale_stock.onchange_partner_id()
self.sale_stock.onchange_partner_shipping_id()
for line in self.sale_stock.order_line:
line.product_id_change()
line.onchange_fiscal()
self.assertTrue(
line.fiscal_position_id,
"Error to mapping Fiscal Position on Sale Order Line.",
)
self.assertEquals(
self.sale_stock.amount_total,
7473.3,
u"Error to apply discount on sale order.",
)
self.assertEquals(
self.sale_stock.amount_freight,
6.0,
u"Error to calculate Total Amount Freight.",
)
self.assertEquals(
self.sale_stock.amount_costs, 2.0, u"Error to calculate Total Amount Costs."
)
self.assertEquals(
self.sale_stock.amount_extra, 12.0, u"Error to calculate Total Amount Extra"
)
self.assertEquals(
self.sale_stock.amount_insurance,
4.0,
u"Error to calculate Total Amount Extra",
)
self.sale_stock.action_confirm()
# Create and check invoice
self.sale_stock.action_invoice_create(final=True)
for invoice in self.sale_stock.invoice_ids:
self.assertEquals(
invoice.amount_untaxed,
7315.0,
u"Error to apply discount on invoice" u" created from sale order.",
)
for line in invoice.invoice_line_ids:
self.assertTrue(
line.company_id,
"Error to inform field company_id on Sale Order Line.",
)
self.assertTrue(
line.partner_id,
"Error to inform field partner_id on Sale Order Line.",
)
|
|
4000af5d4ca53683b4577d7394c923c22f79ca52
|
simulate_loads.py
|
simulate_loads.py
|
import random
import itertools as it
from sklearn.externals import joblib
def simulate_loads(nfrag, ngt, q):
loads = [1] * nfrag
active = set(range(nfrag))
for k in range(1, len(loads)):
i0, i1 = random.sample(active, k=2)
active.remove(i0)
active.remove(i1)
active.add(len(loads))
if random.random() > q: # correct merge
new_load = max(loads[i0], loads[i1])
else: # false merge
new_load = min(loads[i0] + loads[i1], ngt)
loads.append(new_load)
return loads
def many_sims(n_jobs=2):
qs = [.025, .05, .1, .2]
nfrags = [10000, 20000, 40000, 80000, 160000]
nreps = 5
keys = [(n, q) for n, q, i in it.product(nfrags, qs, range(nreps))]
results = joblib.Parallel(n_jobs=n_jobs)(
joblib.delayed(simulate_loads)(n, 1000, q) for n, q in keys
)
return dict(zip(keys, results))
|
Add function to simulate load of CSR merge matrix
|
Add function to simulate load of CSR merge matrix
|
Python
|
bsd-3-clause
|
jni/gala-scripts
|
Add function to simulate load of CSR merge matrix
|
import random
import itertools as it
from sklearn.externals import joblib
def simulate_loads(nfrag, ngt, q):
loads = [1] * nfrag
active = set(range(nfrag))
for k in range(1, len(loads)):
i0, i1 = random.sample(active, k=2)
active.remove(i0)
active.remove(i1)
active.add(len(loads))
if random.random() > q: # correct merge
new_load = max(loads[i0], loads[i1])
else: # false merge
new_load = min(loads[i0] + loads[i1], ngt)
loads.append(new_load)
return loads
def many_sims(n_jobs=2):
qs = [.025, .05, .1, .2]
nfrags = [10000, 20000, 40000, 80000, 160000]
nreps = 5
keys = [(n, q) for n, q, i in it.product(nfrags, qs, range(nreps))]
results = joblib.Parallel(n_jobs=n_jobs)(
joblib.delayed(simulate_loads)(n, 1000, q) for n, q in keys
)
return dict(zip(keys, results))
|
<commit_before><commit_msg>Add function to simulate load of CSR merge matrix<commit_after>
|
import random
import itertools as it
from sklearn.externals import joblib
def simulate_loads(nfrag, ngt, q):
loads = [1] * nfrag
active = set(range(nfrag))
for k in range(1, len(loads)):
i0, i1 = random.sample(active, k=2)
active.remove(i0)
active.remove(i1)
active.add(len(loads))
if random.random() > q: # correct merge
new_load = max(loads[i0], loads[i1])
else: # false merge
new_load = min(loads[i0] + loads[i1], ngt)
loads.append(new_load)
return loads
def many_sims(n_jobs=2):
qs = [.025, .05, .1, .2]
nfrags = [10000, 20000, 40000, 80000, 160000]
nreps = 5
keys = [(n, q) for n, q, i in it.product(nfrags, qs, range(nreps))]
results = joblib.Parallel(n_jobs=n_jobs)(
joblib.delayed(simulate_loads)(n, 1000, q) for n, q in keys
)
return dict(zip(keys, results))
|
Add function to simulate load of CSR merge matriximport random
import itertools as it
from sklearn.externals import joblib
def simulate_loads(nfrag, ngt, q):
loads = [1] * nfrag
active = set(range(nfrag))
for k in range(1, len(loads)):
i0, i1 = random.sample(active, k=2)
active.remove(i0)
active.remove(i1)
active.add(len(loads))
if random.random() > q: # correct merge
new_load = max(loads[i0], loads[i1])
else: # false merge
new_load = min(loads[i0] + loads[i1], ngt)
loads.append(new_load)
return loads
def many_sims(n_jobs=2):
qs = [.025, .05, .1, .2]
nfrags = [10000, 20000, 40000, 80000, 160000]
nreps = 5
keys = [(n, q) for n, q, i in it.product(nfrags, qs, range(nreps))]
results = joblib.Parallel(n_jobs=n_jobs)(
joblib.delayed(simulate_loads)(n, 1000, q) for n, q in keys
)
return dict(zip(keys, results))
|
<commit_before><commit_msg>Add function to simulate load of CSR merge matrix<commit_after>import random
import itertools as it
from sklearn.externals import joblib
def simulate_loads(nfrag, ngt, q):
loads = [1] * nfrag
active = set(range(nfrag))
for k in range(1, len(loads)):
i0, i1 = random.sample(active, k=2)
active.remove(i0)
active.remove(i1)
active.add(len(loads))
if random.random() > q: # correct merge
new_load = max(loads[i0], loads[i1])
else: # false merge
new_load = min(loads[i0] + loads[i1], ngt)
loads.append(new_load)
return loads
def many_sims(n_jobs=2):
qs = [.025, .05, .1, .2]
nfrags = [10000, 20000, 40000, 80000, 160000]
nreps = 5
keys = [(n, q) for n, q, i in it.product(nfrags, qs, range(nreps))]
results = joblib.Parallel(n_jobs=n_jobs)(
joblib.delayed(simulate_loads)(n, 1000, q) for n, q in keys
)
return dict(zip(keys, results))
|
|
4a4cb336839d42cee872e52399e17249b948492a
|
rackattack/common/globallock.py
|
rackattack/common/globallock.py
|
import threading
import contextlib
import time
import traceback
import logging
_lock = threading.Lock()
@contextlib.contextmanager
def lock():
before = time.time()
with _lock:
acquired = time.time()
took = acquired - before
if took > 0.1:
logging.error(
"Acquiring the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=traceback.format_stack()))
yield
released = time.time()
took = released - acquired
if took > 0.3:
logging.error(
"Holding the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=traceback.format_stack()))
def assertLocked():
assert not _lock.acquire(False)
return True
|
import threading
import contextlib
import time
import traceback
import logging
_lock = threading.Lock()
def prettyStack():
return "\n".join([line.strip() for line in traceback.format_stack()])
@contextlib.contextmanager
def lock():
before = time.time()
with _lock:
acquired = time.time()
took = acquired - before
if took > 0.1:
logging.error(
"Acquiring the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=prettyStack()))
yield
released = time.time()
took = released - acquired
if took > 0.3:
logging.error(
"Holding the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=prettyStack()))
def assertLocked():
assert not _lock.acquire(False)
return True
|
Print the stack info more clearly, when holding the global lock for too long
|
Print the stack info more clearly, when holding the global lock for too long
|
Python
|
apache-2.0
|
Stratoscale/rackattack-virtual,eliran-stratoscale/rackattack-virtual,Stratoscale/rackattack-virtual,eliran-stratoscale/rackattack-virtual
|
import threading
import contextlib
import time
import traceback
import logging
_lock = threading.Lock()
@contextlib.contextmanager
def lock():
before = time.time()
with _lock:
acquired = time.time()
took = acquired - before
if took > 0.1:
logging.error(
"Acquiring the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=traceback.format_stack()))
yield
released = time.time()
took = released - acquired
if took > 0.3:
logging.error(
"Holding the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=traceback.format_stack()))
def assertLocked():
assert not _lock.acquire(False)
return True
Print the stack info more clearly, when holding the global lock for too long
|
import threading
import contextlib
import time
import traceback
import logging
_lock = threading.Lock()
def prettyStack():
return "\n".join([line.strip() for line in traceback.format_stack()])
@contextlib.contextmanager
def lock():
before = time.time()
with _lock:
acquired = time.time()
took = acquired - before
if took > 0.1:
logging.error(
"Acquiring the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=prettyStack()))
yield
released = time.time()
took = released - acquired
if took > 0.3:
logging.error(
"Holding the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=prettyStack()))
def assertLocked():
assert not _lock.acquire(False)
return True
|
<commit_before>import threading
import contextlib
import time
import traceback
import logging
_lock = threading.Lock()
@contextlib.contextmanager
def lock():
before = time.time()
with _lock:
acquired = time.time()
took = acquired - before
if took > 0.1:
logging.error(
"Acquiring the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=traceback.format_stack()))
yield
released = time.time()
took = released - acquired
if took > 0.3:
logging.error(
"Holding the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=traceback.format_stack()))
def assertLocked():
assert not _lock.acquire(False)
return True
<commit_msg>Print the stack info more clearly, when holding the global lock for too long<commit_after>
|
import threading
import contextlib
import time
import traceback
import logging
_lock = threading.Lock()
def prettyStack():
return "\n".join([line.strip() for line in traceback.format_stack()])
@contextlib.contextmanager
def lock():
before = time.time()
with _lock:
acquired = time.time()
took = acquired - before
if took > 0.1:
logging.error(
"Acquiring the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=prettyStack()))
yield
released = time.time()
took = released - acquired
if took > 0.3:
logging.error(
"Holding the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=prettyStack()))
def assertLocked():
assert not _lock.acquire(False)
return True
|
import threading
import contextlib
import time
import traceback
import logging
_lock = threading.Lock()
@contextlib.contextmanager
def lock():
before = time.time()
with _lock:
acquired = time.time()
took = acquired - before
if took > 0.1:
logging.error(
"Acquiring the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=traceback.format_stack()))
yield
released = time.time()
took = released - acquired
if took > 0.3:
logging.error(
"Holding the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=traceback.format_stack()))
def assertLocked():
assert not _lock.acquire(False)
return True
Print the stack info more clearly, when holding the global lock for too longimport threading
import contextlib
import time
import traceback
import logging
_lock = threading.Lock()
def prettyStack():
return "\n".join([line.strip() for line in traceback.format_stack()])
@contextlib.contextmanager
def lock():
before = time.time()
with _lock:
acquired = time.time()
took = acquired - before
if took > 0.1:
logging.error(
"Acquiring the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=prettyStack()))
yield
released = time.time()
took = released - acquired
if took > 0.3:
logging.error(
"Holding the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=prettyStack()))
def assertLocked():
assert not _lock.acquire(False)
return True
|
<commit_before>import threading
import contextlib
import time
import traceback
import logging
_lock = threading.Lock()
@contextlib.contextmanager
def lock():
before = time.time()
with _lock:
acquired = time.time()
took = acquired - before
if took > 0.1:
logging.error(
"Acquiring the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=traceback.format_stack()))
yield
released = time.time()
took = released - acquired
if took > 0.3:
logging.error(
"Holding the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=traceback.format_stack()))
def assertLocked():
assert not _lock.acquire(False)
return True
<commit_msg>Print the stack info more clearly, when holding the global lock for too long<commit_after>import threading
import contextlib
import time
import traceback
import logging
_lock = threading.Lock()
def prettyStack():
return "\n".join([line.strip() for line in traceback.format_stack()])
@contextlib.contextmanager
def lock():
before = time.time()
with _lock:
acquired = time.time()
took = acquired - before
if took > 0.1:
logging.error(
"Acquiring the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=prettyStack()))
yield
released = time.time()
took = released - acquired
if took > 0.3:
logging.error(
"Holding the global lock took more than 0.1s: %(took)ss. Stack:\n%(stack)s", dict(
took=took, stack=prettyStack()))
def assertLocked():
assert not _lock.acquire(False)
return True
|
217b5fd119a835ee0c3f2a359fe814727ef1c954
|
misc/system_list.py
|
misc/system_list.py
|
import lxml
import json
import urllib
from lxml import etree
tmpresponce = urllib.urlopen("https://api.eveonline.com/map/Sovereignty.xml.aspx")
resString = tmpresponce.read()
#print type(resString)
parser = etree.XMLParser(remove_blank_text=True)
responce = etree.XML(resString, parser)
print "starting"
systemIDMap = {}
# Get the array of rows, the rowset element
rows = responce.xpath("//rowset")
#iterate through the rows
for row in rows[0]:
systemIDMap[row.get("solarSystemID")] = row.get("solarSystemName")
print systemIDMap["30002768"]
#print etree.tostring(responce, pretty_print=True)
|
Load the Sovereignty xml and process
|
Load the Sovereignty xml and process
Compose a list of SystemID to SystemName mappings
|
Python
|
apache-2.0
|
Funi1234/InternetSpaceships,Funi1234/InternetSpaceships,Funi1234/InternetSpaceships
|
Load the Sovereignty xml and process
Compose a list of SystemID to SystemName mappings
|
import lxml
import json
import urllib
from lxml import etree
tmpresponce = urllib.urlopen("https://api.eveonline.com/map/Sovereignty.xml.aspx")
resString = tmpresponce.read()
#print type(resString)
parser = etree.XMLParser(remove_blank_text=True)
responce = etree.XML(resString, parser)
print "starting"
systemIDMap = {}
# Get the array of rows, the rowset element
rows = responce.xpath("//rowset")
#iterate through the rows
for row in rows[0]:
systemIDMap[row.get("solarSystemID")] = row.get("solarSystemName")
print systemIDMap["30002768"]
#print etree.tostring(responce, pretty_print=True)
|
<commit_before><commit_msg>Load the Sovereignty xml and process
Compose a list of SystemID to SystemName mappings<commit_after>
|
import lxml
import json
import urllib
from lxml import etree
tmpresponce = urllib.urlopen("https://api.eveonline.com/map/Sovereignty.xml.aspx")
resString = tmpresponce.read()
#print type(resString)
parser = etree.XMLParser(remove_blank_text=True)
responce = etree.XML(resString, parser)
print "starting"
systemIDMap = {}
# Get the array of rows, the rowset element
rows = responce.xpath("//rowset")
#iterate through the rows
for row in rows[0]:
systemIDMap[row.get("solarSystemID")] = row.get("solarSystemName")
print systemIDMap["30002768"]
#print etree.tostring(responce, pretty_print=True)
|
Load the Sovereignty xml and process
Compose a list of SystemID to SystemName mappingsimport lxml
import json
import urllib
from lxml import etree
tmpresponce = urllib.urlopen("https://api.eveonline.com/map/Sovereignty.xml.aspx")
resString = tmpresponce.read()
#print type(resString)
parser = etree.XMLParser(remove_blank_text=True)
responce = etree.XML(resString, parser)
print "starting"
systemIDMap = {}
# Get the array of rows, the rowset element
rows = responce.xpath("//rowset")
#iterate through the rows
for row in rows[0]:
systemIDMap[row.get("solarSystemID")] = row.get("solarSystemName")
print systemIDMap["30002768"]
#print etree.tostring(responce, pretty_print=True)
|
<commit_before><commit_msg>Load the Sovereignty xml and process
Compose a list of SystemID to SystemName mappings<commit_after>import lxml
import json
import urllib
from lxml import etree
tmpresponce = urllib.urlopen("https://api.eveonline.com/map/Sovereignty.xml.aspx")
resString = tmpresponce.read()
#print type(resString)
parser = etree.XMLParser(remove_blank_text=True)
responce = etree.XML(resString, parser)
print "starting"
systemIDMap = {}
# Get the array of rows, the rowset element
rows = responce.xpath("//rowset")
#iterate through the rows
for row in rows[0]:
systemIDMap[row.get("solarSystemID")] = row.get("solarSystemName")
print systemIDMap["30002768"]
#print etree.tostring(responce, pretty_print=True)
|
|
0493098758f1c500a76852e9b032d2c867829ba8
|
src/ggrc/migrations/versions/20150326190126_16883afbc18b_update_status_column_to_draft.py
|
src/ggrc/migrations/versions/20150326190126_16883afbc18b_update_status_column_to_draft.py
|
"""Update status column to draft
Revision ID: 16883afbc18b
Revises: 56bda17c92ee
Create Date: 2015-03-26 19:01:26.702662
"""
# revision identifiers, used by Alembic.
revision = '16883afbc18b'
down_revision = '56bda17c92ee'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
from ggrc.models.track_object_state import ObjectStates, ObjectStateTables
def upgrade():
for table_name in ObjectStateTables.table_names:
# Set the status value to Draft in all existing records where the value is Null
object_table = table(table_name,
column('status', sa.String(length=250)))
op.execute(
object_table.update().values(status = ObjectStates.DRAFT)\
.where(object_table.c.status == None)
)
def downgrade():
pass
|
Update object status to Draft when null, CORE-1519
|
Update object status to Draft when null, CORE-1519
|
Python
|
apache-2.0
|
AleksNeStu/ggrc-core,VinnieJohns/ggrc-core,VinnieJohns/ggrc-core,josthkko/ggrc-core,NejcZupec/ggrc-core,hyperNURb/ggrc-core,kr41/ggrc-core,vladan-m/ggrc-core,josthkko/ggrc-core,uskudnik/ggrc-core,vladan-m/ggrc-core,hasanalom/ggrc-core,jmakov/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,j0gurt/ggrc-core,hyperNURb/ggrc-core,hyperNURb/ggrc-core,hasanalom/ggrc-core,hasanalom/ggrc-core,kr41/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,uskudnik/ggrc-core,selahssea/ggrc-core,kr41/ggrc-core,NejcZupec/ggrc-core,edofic/ggrc-core,vladan-m/ggrc-core,AleksNeStu/ggrc-core,hyperNURb/ggrc-core,jmakov/ggrc-core,prasannav7/ggrc-core,prasannav7/ggrc-core,plamut/ggrc-core,selahssea/ggrc-core,jmakov/ggrc-core,uskudnik/ggrc-core,selahssea/ggrc-core,josthkko/ggrc-core,plamut/ggrc-core,plamut/ggrc-core,prasannav7/ggrc-core,AleksNeStu/ggrc-core,vladan-m/ggrc-core,prasannav7/ggrc-core,jmakov/ggrc-core,kr41/ggrc-core,josthkko/ggrc-core,andrei-karalionak/ggrc-core,jmakov/ggrc-core,j0gurt/ggrc-core,edofic/ggrc-core,uskudnik/ggrc-core,AleksNeStu/ggrc-core,NejcZupec/ggrc-core,plamut/ggrc-core,hasanalom/ggrc-core,j0gurt/ggrc-core,vladan-m/ggrc-core,andrei-karalionak/ggrc-core,VinnieJohns/ggrc-core,andrei-karalionak/ggrc-core,j0gurt/ggrc-core,uskudnik/ggrc-core,selahssea/ggrc-core,edofic/ggrc-core,hasanalom/ggrc-core,hyperNURb/ggrc-core
|
Update object status to Draft when null, CORE-1519
|
"""Update status column to draft
Revision ID: 16883afbc18b
Revises: 56bda17c92ee
Create Date: 2015-03-26 19:01:26.702662
"""
# revision identifiers, used by Alembic.
revision = '16883afbc18b'
down_revision = '56bda17c92ee'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
from ggrc.models.track_object_state import ObjectStates, ObjectStateTables
def upgrade():
for table_name in ObjectStateTables.table_names:
# Set the status value to Draft in all existing records where the value is Null
object_table = table(table_name,
column('status', sa.String(length=250)))
op.execute(
object_table.update().values(status = ObjectStates.DRAFT)\
.where(object_table.c.status == None)
)
def downgrade():
pass
|
<commit_before><commit_msg>Update object status to Draft when null, CORE-1519<commit_after>
|
"""Update status column to draft
Revision ID: 16883afbc18b
Revises: 56bda17c92ee
Create Date: 2015-03-26 19:01:26.702662
"""
# revision identifiers, used by Alembic.
revision = '16883afbc18b'
down_revision = '56bda17c92ee'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
from ggrc.models.track_object_state import ObjectStates, ObjectStateTables
def upgrade():
for table_name in ObjectStateTables.table_names:
# Set the status value to Draft in all existing records where the value is Null
object_table = table(table_name,
column('status', sa.String(length=250)))
op.execute(
object_table.update().values(status = ObjectStates.DRAFT)\
.where(object_table.c.status == None)
)
def downgrade():
pass
|
Update object status to Draft when null, CORE-1519
"""Update status column to draft
Revision ID: 16883afbc18b
Revises: 56bda17c92ee
Create Date: 2015-03-26 19:01:26.702662
"""
# revision identifiers, used by Alembic.
revision = '16883afbc18b'
down_revision = '56bda17c92ee'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
from ggrc.models.track_object_state import ObjectStates, ObjectStateTables
def upgrade():
for table_name in ObjectStateTables.table_names:
# Set the status value to Draft in all existing records where the value is Null
object_table = table(table_name,
column('status', sa.String(length=250)))
op.execute(
object_table.update().values(status = ObjectStates.DRAFT)\
.where(object_table.c.status == None)
)
def downgrade():
pass
|
<commit_before><commit_msg>Update object status to Draft when null, CORE-1519<commit_after>
"""Update status column to draft
Revision ID: 16883afbc18b
Revises: 56bda17c92ee
Create Date: 2015-03-26 19:01:26.702662
"""
# revision identifiers, used by Alembic.
revision = '16883afbc18b'
down_revision = '56bda17c92ee'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
from ggrc.models.track_object_state import ObjectStates, ObjectStateTables
def upgrade():
for table_name in ObjectStateTables.table_names:
# Set the status value to Draft in all existing records where the value is Null
object_table = table(table_name,
column('status', sa.String(length=250)))
op.execute(
object_table.update().values(status = ObjectStates.DRAFT)\
.where(object_table.c.status == None)
)
def downgrade():
pass
|
|
a821717103f8b469b71cee4f60fce50dd792cb36
|
InvenTree/stock/migrations/0071_auto_20211205_1733.py
|
InvenTree/stock/migrations/0071_auto_20211205_1733.py
|
# Generated by Django 3.2.5 on 2021-12-05 06:33
from django.db import migrations
import logging
logger = logging.getLogger('inventree')
def delete_scheduled(apps, schema_editor):
"""
Delete all stock items which are marked as 'scheduled_for_deletion'.
The issue that this field was addressing has now been fixed,
and so we can all move on with our lives...
"""
StockItem = apps.get_model('stock', 'stockitem')
items = StockItem.objects.filter(scheduled_for_deletion=True)
logger.info(f"Removing {items.count()} stock items scheduled for deletion")
items.delete()
Task = apps.get_model('django_q', 'schedule')
Task.objects.filter(func='stock.tasks.delete_old_stock_items').delete()
def reverse(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('stock', '0070_auto_20211128_0151'),
]
operations = [
migrations.RunPython(
delete_scheduled,
reverse_code=reverse,
)
]
|
Add a data migration which deletes any stock items which have been scheduled for deletion.
|
Add a data migration which deletes any stock items which have been scheduled for deletion.
Also deletes any instance of the "delete_old_stock_items" worker task
|
Python
|
mit
|
SchrodingersGat/InvenTree,inventree/InvenTree,inventree/InvenTree,SchrodingersGat/InvenTree,SchrodingersGat/InvenTree,SchrodingersGat/InvenTree,inventree/InvenTree,inventree/InvenTree
|
Add a data migration which deletes any stock items which have been scheduled for deletion.
Also deletes any instance of the "delete_old_stock_items" worker task
|
# Generated by Django 3.2.5 on 2021-12-05 06:33
from django.db import migrations
import logging
logger = logging.getLogger('inventree')
def delete_scheduled(apps, schema_editor):
"""
Delete all stock items which are marked as 'scheduled_for_deletion'.
The issue that this field was addressing has now been fixed,
and so we can all move on with our lives...
"""
StockItem = apps.get_model('stock', 'stockitem')
items = StockItem.objects.filter(scheduled_for_deletion=True)
logger.info(f"Removing {items.count()} stock items scheduled for deletion")
items.delete()
Task = apps.get_model('django_q', 'schedule')
Task.objects.filter(func='stock.tasks.delete_old_stock_items').delete()
def reverse(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('stock', '0070_auto_20211128_0151'),
]
operations = [
migrations.RunPython(
delete_scheduled,
reverse_code=reverse,
)
]
|
<commit_before><commit_msg>Add a data migration which deletes any stock items which have been scheduled for deletion.
Also deletes any instance of the "delete_old_stock_items" worker task<commit_after>
|
# Generated by Django 3.2.5 on 2021-12-05 06:33
from django.db import migrations
import logging
logger = logging.getLogger('inventree')
def delete_scheduled(apps, schema_editor):
"""
Delete all stock items which are marked as 'scheduled_for_deletion'.
The issue that this field was addressing has now been fixed,
and so we can all move on with our lives...
"""
StockItem = apps.get_model('stock', 'stockitem')
items = StockItem.objects.filter(scheduled_for_deletion=True)
logger.info(f"Removing {items.count()} stock items scheduled for deletion")
items.delete()
Task = apps.get_model('django_q', 'schedule')
Task.objects.filter(func='stock.tasks.delete_old_stock_items').delete()
def reverse(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('stock', '0070_auto_20211128_0151'),
]
operations = [
migrations.RunPython(
delete_scheduled,
reverse_code=reverse,
)
]
|
Add a data migration which deletes any stock items which have been scheduled for deletion.
Also deletes any instance of the "delete_old_stock_items" worker task# Generated by Django 3.2.5 on 2021-12-05 06:33
from django.db import migrations
import logging
logger = logging.getLogger('inventree')
def delete_scheduled(apps, schema_editor):
"""
Delete all stock items which are marked as 'scheduled_for_deletion'.
The issue that this field was addressing has now been fixed,
and so we can all move on with our lives...
"""
StockItem = apps.get_model('stock', 'stockitem')
items = StockItem.objects.filter(scheduled_for_deletion=True)
logger.info(f"Removing {items.count()} stock items scheduled for deletion")
items.delete()
Task = apps.get_model('django_q', 'schedule')
Task.objects.filter(func='stock.tasks.delete_old_stock_items').delete()
def reverse(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('stock', '0070_auto_20211128_0151'),
]
operations = [
migrations.RunPython(
delete_scheduled,
reverse_code=reverse,
)
]
|
<commit_before><commit_msg>Add a data migration which deletes any stock items which have been scheduled for deletion.
Also deletes any instance of the "delete_old_stock_items" worker task<commit_after># Generated by Django 3.2.5 on 2021-12-05 06:33
from django.db import migrations
import logging
logger = logging.getLogger('inventree')
def delete_scheduled(apps, schema_editor):
"""
Delete all stock items which are marked as 'scheduled_for_deletion'.
The issue that this field was addressing has now been fixed,
and so we can all move on with our lives...
"""
StockItem = apps.get_model('stock', 'stockitem')
items = StockItem.objects.filter(scheduled_for_deletion=True)
logger.info(f"Removing {items.count()} stock items scheduled for deletion")
items.delete()
Task = apps.get_model('django_q', 'schedule')
Task.objects.filter(func='stock.tasks.delete_old_stock_items').delete()
def reverse(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('stock', '0070_auto_20211128_0151'),
]
operations = [
migrations.RunPython(
delete_scheduled,
reverse_code=reverse,
)
]
|
|
2de2a7fc80df710e308846ffde8aa4546e832394
|
chrome/PRESUBMIT.py
|
chrome/PRESUBMIT.py
|
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that the chrome/ code is cpplint clean."""
INCLUDE_CPP_FILES_ONLY = (
r'.*\.cc$', r'.*\.h$'
)
EXCLUDE = (
# Objective C confuses everything.
r'.*cocoa.*',
r'.*_mac\.(cc|h)$',
r'.*_mac_.*',
# All the messages files do weird multiple include trickery
r'.*_messages_internal\.h$',
r'render_messages.h$',
# Autogenerated window resources files are off limits
r'.*resource.h$',
# GTK macros in C-ish header code cause false positives
r'gtk_.*\.h$',
# Header trickery
r'.*-inl\.h$',
# Templates
r'sigslotrepeater\.h$',
# GCC attribute trickery
r'sel_main\.cc$',
# Mozilla code
r'mork_reader\.h$',
r'mork_reader\.cc$',
r'nss_decryptor_linux\.cc$',
# Has safe printf usage that cpplint complains about
r'safe_browsing_util\.cc$',
# Too much math on one line?
r'bloom_filter\.cc$',
# Bogus ifdef tricks
r'renderer_webkitclient_impl\.cc$',
r'temp_scaffolding_stubs\.h$',
# Lines > 100 chars
r'gcapi\.cc$',
)
def CheckChangeOnUpload(input_api, output_api):
results = []
black_list = input_api.DEFAULT_BLACK_LIST + EXCLUDE
sources = lambda x: input_api.FilterSourceFile(
x, white_list=INCLUDE_CPP_FILES_ONLY, black_list=black_list)
results.extend(input_api.canned_checks.CheckChangeLintsClean(
input_api, output_api, sources))
return results
|
Call the new presubmit checks from chrome/ code, with a blacklist.
|
Call the new presubmit checks from chrome/ code, with a blacklist.
Review URL: http://codereview.chromium.org/400014
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@32190 0039d316-1c4b-4281-b951-d872f2087c98
|
Python
|
bsd-3-clause
|
yitian134/chromium,adobe/chromium,ropik/chromium,adobe/chromium,adobe/chromium,ropik/chromium,Crystalnix/house-of-life-chromium,yitian134/chromium,yitian134/chromium,gavinp/chromium,Crystalnix/house-of-life-chromium,yitian134/chromium,Crystalnix/house-of-life-chromium,gavinp/chromium,yitian134/chromium,adobe/chromium,Crystalnix/house-of-life-chromium,Crystalnix/house-of-life-chromium,gavinp/chromium,adobe/chromium,yitian134/chromium,gavinp/chromium,yitian134/chromium,adobe/chromium,ropik/chromium,adobe/chromium,gavinp/chromium,adobe/chromium,Crystalnix/house-of-life-chromium,Crystalnix/house-of-life-chromium,Crystalnix/house-of-life-chromium,Crystalnix/house-of-life-chromium,yitian134/chromium,ropik/chromium,ropik/chromium,adobe/chromium,gavinp/chromium,gavinp/chromium,adobe/chromium,gavinp/chromium,ropik/chromium,ropik/chromium,Crystalnix/house-of-life-chromium,Crystalnix/house-of-life-chromium,gavinp/chromium,ropik/chromium,ropik/chromium,yitian134/chromium,yitian134/chromium,adobe/chromium,gavinp/chromium
|
Call the new presubmit checks from chrome/ code, with a blacklist.
Review URL: http://codereview.chromium.org/400014
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@32190 0039d316-1c4b-4281-b951-d872f2087c98
|
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that the chrome/ code is cpplint clean."""
INCLUDE_CPP_FILES_ONLY = (
r'.*\.cc$', r'.*\.h$'
)
EXCLUDE = (
# Objective C confuses everything.
r'.*cocoa.*',
r'.*_mac\.(cc|h)$',
r'.*_mac_.*',
# All the messages files do weird multiple include trickery
r'.*_messages_internal\.h$',
r'render_messages.h$',
# Autogenerated window resources files are off limits
r'.*resource.h$',
# GTK macros in C-ish header code cause false positives
r'gtk_.*\.h$',
# Header trickery
r'.*-inl\.h$',
# Templates
r'sigslotrepeater\.h$',
# GCC attribute trickery
r'sel_main\.cc$',
# Mozilla code
r'mork_reader\.h$',
r'mork_reader\.cc$',
r'nss_decryptor_linux\.cc$',
# Has safe printf usage that cpplint complains about
r'safe_browsing_util\.cc$',
# Too much math on one line?
r'bloom_filter\.cc$',
# Bogus ifdef tricks
r'renderer_webkitclient_impl\.cc$',
r'temp_scaffolding_stubs\.h$',
# Lines > 100 chars
r'gcapi\.cc$',
)
def CheckChangeOnUpload(input_api, output_api):
results = []
black_list = input_api.DEFAULT_BLACK_LIST + EXCLUDE
sources = lambda x: input_api.FilterSourceFile(
x, white_list=INCLUDE_CPP_FILES_ONLY, black_list=black_list)
results.extend(input_api.canned_checks.CheckChangeLintsClean(
input_api, output_api, sources))
return results
|
<commit_before><commit_msg>Call the new presubmit checks from chrome/ code, with a blacklist.
Review URL: http://codereview.chromium.org/400014
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@32190 0039d316-1c4b-4281-b951-d872f2087c98<commit_after>
|
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that the chrome/ code is cpplint clean."""
INCLUDE_CPP_FILES_ONLY = (
r'.*\.cc$', r'.*\.h$'
)
EXCLUDE = (
# Objective C confuses everything.
r'.*cocoa.*',
r'.*_mac\.(cc|h)$',
r'.*_mac_.*',
# All the messages files do weird multiple include trickery
r'.*_messages_internal\.h$',
r'render_messages.h$',
# Autogenerated window resources files are off limits
r'.*resource.h$',
# GTK macros in C-ish header code cause false positives
r'gtk_.*\.h$',
# Header trickery
r'.*-inl\.h$',
# Templates
r'sigslotrepeater\.h$',
# GCC attribute trickery
r'sel_main\.cc$',
# Mozilla code
r'mork_reader\.h$',
r'mork_reader\.cc$',
r'nss_decryptor_linux\.cc$',
# Has safe printf usage that cpplint complains about
r'safe_browsing_util\.cc$',
# Too much math on one line?
r'bloom_filter\.cc$',
# Bogus ifdef tricks
r'renderer_webkitclient_impl\.cc$',
r'temp_scaffolding_stubs\.h$',
# Lines > 100 chars
r'gcapi\.cc$',
)
def CheckChangeOnUpload(input_api, output_api):
results = []
black_list = input_api.DEFAULT_BLACK_LIST + EXCLUDE
sources = lambda x: input_api.FilterSourceFile(
x, white_list=INCLUDE_CPP_FILES_ONLY, black_list=black_list)
results.extend(input_api.canned_checks.CheckChangeLintsClean(
input_api, output_api, sources))
return results
|
Call the new presubmit checks from chrome/ code, with a blacklist.
Review URL: http://codereview.chromium.org/400014
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@32190 0039d316-1c4b-4281-b951-d872f2087c98# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that the chrome/ code is cpplint clean."""
INCLUDE_CPP_FILES_ONLY = (
r'.*\.cc$', r'.*\.h$'
)
EXCLUDE = (
# Objective C confuses everything.
r'.*cocoa.*',
r'.*_mac\.(cc|h)$',
r'.*_mac_.*',
# All the messages files do weird multiple include trickery
r'.*_messages_internal\.h$',
r'render_messages.h$',
# Autogenerated window resources files are off limits
r'.*resource.h$',
# GTK macros in C-ish header code cause false positives
r'gtk_.*\.h$',
# Header trickery
r'.*-inl\.h$',
# Templates
r'sigslotrepeater\.h$',
# GCC attribute trickery
r'sel_main\.cc$',
# Mozilla code
r'mork_reader\.h$',
r'mork_reader\.cc$',
r'nss_decryptor_linux\.cc$',
# Has safe printf usage that cpplint complains about
r'safe_browsing_util\.cc$',
# Too much math on one line?
r'bloom_filter\.cc$',
# Bogus ifdef tricks
r'renderer_webkitclient_impl\.cc$',
r'temp_scaffolding_stubs\.h$',
# Lines > 100 chars
r'gcapi\.cc$',
)
def CheckChangeOnUpload(input_api, output_api):
results = []
black_list = input_api.DEFAULT_BLACK_LIST + EXCLUDE
sources = lambda x: input_api.FilterSourceFile(
x, white_list=INCLUDE_CPP_FILES_ONLY, black_list=black_list)
results.extend(input_api.canned_checks.CheckChangeLintsClean(
input_api, output_api, sources))
return results
|
<commit_before><commit_msg>Call the new presubmit checks from chrome/ code, with a blacklist.
Review URL: http://codereview.chromium.org/400014
git-svn-id: de016e52bd170d2d4f2344f9bf92d50478b649e0@32190 0039d316-1c4b-4281-b951-d872f2087c98<commit_after># Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Makes sure that the chrome/ code is cpplint clean."""
INCLUDE_CPP_FILES_ONLY = (
r'.*\.cc$', r'.*\.h$'
)
EXCLUDE = (
# Objective C confuses everything.
r'.*cocoa.*',
r'.*_mac\.(cc|h)$',
r'.*_mac_.*',
# All the messages files do weird multiple include trickery
r'.*_messages_internal\.h$',
r'render_messages.h$',
# Autogenerated window resources files are off limits
r'.*resource.h$',
# GTK macros in C-ish header code cause false positives
r'gtk_.*\.h$',
# Header trickery
r'.*-inl\.h$',
# Templates
r'sigslotrepeater\.h$',
# GCC attribute trickery
r'sel_main\.cc$',
# Mozilla code
r'mork_reader\.h$',
r'mork_reader\.cc$',
r'nss_decryptor_linux\.cc$',
# Has safe printf usage that cpplint complains about
r'safe_browsing_util\.cc$',
# Too much math on one line?
r'bloom_filter\.cc$',
# Bogus ifdef tricks
r'renderer_webkitclient_impl\.cc$',
r'temp_scaffolding_stubs\.h$',
# Lines > 100 chars
r'gcapi\.cc$',
)
def CheckChangeOnUpload(input_api, output_api):
results = []
black_list = input_api.DEFAULT_BLACK_LIST + EXCLUDE
sources = lambda x: input_api.FilterSourceFile(
x, white_list=INCLUDE_CPP_FILES_ONLY, black_list=black_list)
results.extend(input_api.canned_checks.CheckChangeLintsClean(
input_api, output_api, sources))
return results
|
|
ed6fa694ea7ce62cdbda89a60cbbaad1409c285b
|
misc/add_source_header.py
|
misc/add_source_header.py
|
#!/usr/bin/env python
import sys
import itertools
import os
import re
import traceback
from optparse import OptionParser
header_path = "source_header.txt"
def debug(s):
sys.stderr.write("[DEBUG] %s\n" % s)
def error(s):
sys.stderr.write("[ERROR] %s\n" % s)
def main():
parser = OptionParser()
(options, args) = parser.parse_args()
return
def add_or_replace(path):
# query if this path should be handled
print "Checking %s?" % path
f = open(path, "r")
cur_header = ""
has_empty_line = False
while True:
line = f.readline()
if not line:
break
if line.startswith("//"):
cur_header += line
continue
has_empty_line = line.strip() == ""
rem = line + f.read()
f.close()
break
header_file = open(header_path, "r")
new_header = header_file.read()
if cur_header == new_header:
print "Header not changed; nothing done"
return
else:
sys.stdout.write("Current header: " + cur_header)
sys.stdout.write("Modifying %s? (y/n): " % path)
response = sys.stdin.readline()
response = response.lower().strip()
if not (response == "y" or response == "yes"):
sys.stderr.write("Ignoring %s\n" % path)
return
f = open(path, "w")
f.write(new_header)
if not has_empty_line:
f.write("\n")
f.write(rem)
f.close()
return
def apply_recursively(top):
for dirName, subdirList, fileList in os.walk(top):
print dirName
if ".git" in dirName or "examples" in dirName \
or "gmock" in dirName:
print "Ignoring git directory"
continue
for f in fileList:
if not f.endswith("cc") or f.endswith(".h"):
continue
add_or_replace(os.path.join(dirName, f))
return
def main():
top_path = sys.argv[1]
apply_recursively(top_path)
if __name__ == "__main__":
main()
|
Add a utility script to add source header
|
Add a utility script to add source header
|
Python
|
bsd-3-clause
|
naoyam/physis,naoyam/physis,naoyam/physis,naoyam/physis
|
Add a utility script to add source header
|
#!/usr/bin/env python
import sys
import itertools
import os
import re
import traceback
from optparse import OptionParser
header_path = "source_header.txt"
def debug(s):
sys.stderr.write("[DEBUG] %s\n" % s)
def error(s):
sys.stderr.write("[ERROR] %s\n" % s)
def main():
parser = OptionParser()
(options, args) = parser.parse_args()
return
def add_or_replace(path):
# query if this path should be handled
print "Checking %s?" % path
f = open(path, "r")
cur_header = ""
has_empty_line = False
while True:
line = f.readline()
if not line:
break
if line.startswith("//"):
cur_header += line
continue
has_empty_line = line.strip() == ""
rem = line + f.read()
f.close()
break
header_file = open(header_path, "r")
new_header = header_file.read()
if cur_header == new_header:
print "Header not changed; nothing done"
return
else:
sys.stdout.write("Current header: " + cur_header)
sys.stdout.write("Modifying %s? (y/n): " % path)
response = sys.stdin.readline()
response = response.lower().strip()
if not (response == "y" or response == "yes"):
sys.stderr.write("Ignoring %s\n" % path)
return
f = open(path, "w")
f.write(new_header)
if not has_empty_line:
f.write("\n")
f.write(rem)
f.close()
return
def apply_recursively(top):
for dirName, subdirList, fileList in os.walk(top):
print dirName
if ".git" in dirName or "examples" in dirName \
or "gmock" in dirName:
print "Ignoring git directory"
continue
for f in fileList:
if not f.endswith("cc") or f.endswith(".h"):
continue
add_or_replace(os.path.join(dirName, f))
return
def main():
top_path = sys.argv[1]
apply_recursively(top_path)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a utility script to add source header<commit_after>
|
#!/usr/bin/env python
import sys
import itertools
import os
import re
import traceback
from optparse import OptionParser
header_path = "source_header.txt"
def debug(s):
sys.stderr.write("[DEBUG] %s\n" % s)
def error(s):
sys.stderr.write("[ERROR] %s\n" % s)
def main():
parser = OptionParser()
(options, args) = parser.parse_args()
return
def add_or_replace(path):
# query if this path should be handled
print "Checking %s?" % path
f = open(path, "r")
cur_header = ""
has_empty_line = False
while True:
line = f.readline()
if not line:
break
if line.startswith("//"):
cur_header += line
continue
has_empty_line = line.strip() == ""
rem = line + f.read()
f.close()
break
header_file = open(header_path, "r")
new_header = header_file.read()
if cur_header == new_header:
print "Header not changed; nothing done"
return
else:
sys.stdout.write("Current header: " + cur_header)
sys.stdout.write("Modifying %s? (y/n): " % path)
response = sys.stdin.readline()
response = response.lower().strip()
if not (response == "y" or response == "yes"):
sys.stderr.write("Ignoring %s\n" % path)
return
f = open(path, "w")
f.write(new_header)
if not has_empty_line:
f.write("\n")
f.write(rem)
f.close()
return
def apply_recursively(top):
for dirName, subdirList, fileList in os.walk(top):
print dirName
if ".git" in dirName or "examples" in dirName \
or "gmock" in dirName:
print "Ignoring git directory"
continue
for f in fileList:
if not f.endswith("cc") or f.endswith(".h"):
continue
add_or_replace(os.path.join(dirName, f))
return
def main():
top_path = sys.argv[1]
apply_recursively(top_path)
if __name__ == "__main__":
main()
|
Add a utility script to add source header#!/usr/bin/env python
import sys
import itertools
import os
import re
import traceback
from optparse import OptionParser
header_path = "source_header.txt"
def debug(s):
sys.stderr.write("[DEBUG] %s\n" % s)
def error(s):
sys.stderr.write("[ERROR] %s\n" % s)
def main():
parser = OptionParser()
(options, args) = parser.parse_args()
return
def add_or_replace(path):
# query if this path should be handled
print "Checking %s?" % path
f = open(path, "r")
cur_header = ""
has_empty_line = False
while True:
line = f.readline()
if not line:
break
if line.startswith("//"):
cur_header += line
continue
has_empty_line = line.strip() == ""
rem = line + f.read()
f.close()
break
header_file = open(header_path, "r")
new_header = header_file.read()
if cur_header == new_header:
print "Header not changed; nothing done"
return
else:
sys.stdout.write("Current header: " + cur_header)
sys.stdout.write("Modifying %s? (y/n): " % path)
response = sys.stdin.readline()
response = response.lower().strip()
if not (response == "y" or response == "yes"):
sys.stderr.write("Ignoring %s\n" % path)
return
f = open(path, "w")
f.write(new_header)
if not has_empty_line:
f.write("\n")
f.write(rem)
f.close()
return
def apply_recursively(top):
for dirName, subdirList, fileList in os.walk(top):
print dirName
if ".git" in dirName or "examples" in dirName \
or "gmock" in dirName:
print "Ignoring git directory"
continue
for f in fileList:
if not f.endswith("cc") or f.endswith(".h"):
continue
add_or_replace(os.path.join(dirName, f))
return
def main():
top_path = sys.argv[1]
apply_recursively(top_path)
if __name__ == "__main__":
main()
|
<commit_before><commit_msg>Add a utility script to add source header<commit_after>#!/usr/bin/env python
import sys
import itertools
import os
import re
import traceback
from optparse import OptionParser
header_path = "source_header.txt"
def debug(s):
sys.stderr.write("[DEBUG] %s\n" % s)
def error(s):
sys.stderr.write("[ERROR] %s\n" % s)
def main():
parser = OptionParser()
(options, args) = parser.parse_args()
return
def add_or_replace(path):
# query if this path should be handled
print "Checking %s?" % path
f = open(path, "r")
cur_header = ""
has_empty_line = False
while True:
line = f.readline()
if not line:
break
if line.startswith("//"):
cur_header += line
continue
has_empty_line = line.strip() == ""
rem = line + f.read()
f.close()
break
header_file = open(header_path, "r")
new_header = header_file.read()
if cur_header == new_header:
print "Header not changed; nothing done"
return
else:
sys.stdout.write("Current header: " + cur_header)
sys.stdout.write("Modifying %s? (y/n): " % path)
response = sys.stdin.readline()
response = response.lower().strip()
if not (response == "y" or response == "yes"):
sys.stderr.write("Ignoring %s\n" % path)
return
f = open(path, "w")
f.write(new_header)
if not has_empty_line:
f.write("\n")
f.write(rem)
f.close()
return
def apply_recursively(top):
for dirName, subdirList, fileList in os.walk(top):
print dirName
if ".git" in dirName or "examples" in dirName \
or "gmock" in dirName:
print "Ignoring git directory"
continue
for f in fileList:
if not f.endswith("cc") or f.endswith(".h"):
continue
add_or_replace(os.path.join(dirName, f))
return
def main():
top_path = sys.argv[1]
apply_recursively(top_path)
if __name__ == "__main__":
main()
|
|
57e108e2b1680dc5ec26142d43e90207ec1a8a37
|
app/common/falcon/error_handlers.py
|
app/common/falcon/error_handlers.py
|
from __future__ import unicode_literals, absolute_import, division
import re
import json
from uuid import uuid4
import falcon.responders
from falcon import HTTP_500, HTTP_404, HTTP_405
from app.common.errors import BaseAppError
from app.common.text_utils import to_first_lower
def _handle_app_error(ex, req, res, params):
# TODO send error to sentry
# TODO display error stack if X-Debug were specified
res.status = HTTP_500
res.body = json.dumps(dict(error=dict(
id=uuid4().hex,
message=ex.message,
type=re.sub('Error$', '', to_first_lower(type(ex).__name__))
)))
def _handle_internal_error(ex, req, res, params):
# TODO send error to sentry
# TODO display error stack if X-Debug were specified
res.status = HTTP_500
res.body = json.dumps(dict(error=dict(
id=uuid4().hex,
message=ex.message,
type='internal' # NOTE do not expose real type of unhandled error
)))
def _not_found_responder(req, res, **kwargs):
# TODO send error to sentry
res.status = HTTP_404
res.body = json.dumps(dict(error=dict(
id=uuid4().hex,
message='Requested resource is not found',
type='notFound'
)))
def _create_method_not_allowed_responder(allowed_methods):
allowed = ', '.join(allowed_methods)
def method_not_allowed(req, res, **kwargs):
res.status = HTTP_405
res.set_header('Allow', allowed)
res.body = json.dumps(dict(error=dict(
id=uuid4().hex,
message='The method is not allowed for the requested resource',
type='methodNotAllowed',
allowed_methods=allowed_methods
)))
return method_not_allowed
def _patch_responders():
# There are no way to override Falcon responders other than monkey patching
falcon.responders.path_not_found = _not_found_responder
falcon.responders.create_method_not_allowed \
= _create_method_not_allowed_responder
# TODO consider also patch default bad_request responder
def setup_error_handlers(app):
"""
@type app: falcon.API
"""
_patch_responders()
app.add_error_handler(BaseAppError, _handle_app_error)
app.add_error_handler(Exception, _handle_internal_error)
|
Add handlers for internal/user errors, not found, method not allowed
|
Add handlers for internal/user errors, not found, method not allowed
|
Python
|
mit
|
diyan/falcon_seed
|
Add handlers for internal/user errors, not found, method not allowed
|
from __future__ import unicode_literals, absolute_import, division
import re
import json
from uuid import uuid4
import falcon.responders
from falcon import HTTP_500, HTTP_404, HTTP_405
from app.common.errors import BaseAppError
from app.common.text_utils import to_first_lower
def _handle_app_error(ex, req, res, params):
# TODO send error to sentry
# TODO display error stack if X-Debug were specified
res.status = HTTP_500
res.body = json.dumps(dict(error=dict(
id=uuid4().hex,
message=ex.message,
type=re.sub('Error$', '', to_first_lower(type(ex).__name__))
)))
def _handle_internal_error(ex, req, res, params):
# TODO send error to sentry
# TODO display error stack if X-Debug were specified
res.status = HTTP_500
res.body = json.dumps(dict(error=dict(
id=uuid4().hex,
message=ex.message,
type='internal' # NOTE do not expose real type of unhandled error
)))
def _not_found_responder(req, res, **kwargs):
# TODO send error to sentry
res.status = HTTP_404
res.body = json.dumps(dict(error=dict(
id=uuid4().hex,
message='Requested resource is not found',
type='notFound'
)))
def _create_method_not_allowed_responder(allowed_methods):
allowed = ', '.join(allowed_methods)
def method_not_allowed(req, res, **kwargs):
res.status = HTTP_405
res.set_header('Allow', allowed)
res.body = json.dumps(dict(error=dict(
id=uuid4().hex,
message='The method is not allowed for the requested resource',
type='methodNotAllowed',
allowed_methods=allowed_methods
)))
return method_not_allowed
def _patch_responders():
# There are no way to override Falcon responders other than monkey patching
falcon.responders.path_not_found = _not_found_responder
falcon.responders.create_method_not_allowed \
= _create_method_not_allowed_responder
# TODO consider also patch default bad_request responder
def setup_error_handlers(app):
"""
@type app: falcon.API
"""
_patch_responders()
app.add_error_handler(BaseAppError, _handle_app_error)
app.add_error_handler(Exception, _handle_internal_error)
|
<commit_before><commit_msg>Add handlers for internal/user errors, not found, method not allowed<commit_after>
|
from __future__ import unicode_literals, absolute_import, division
import re
import json
from uuid import uuid4
import falcon.responders
from falcon import HTTP_500, HTTP_404, HTTP_405
from app.common.errors import BaseAppError
from app.common.text_utils import to_first_lower
def _handle_app_error(ex, req, res, params):
# TODO send error to sentry
# TODO display error stack if X-Debug were specified
res.status = HTTP_500
res.body = json.dumps(dict(error=dict(
id=uuid4().hex,
message=ex.message,
type=re.sub('Error$', '', to_first_lower(type(ex).__name__))
)))
def _handle_internal_error(ex, req, res, params):
# TODO send error to sentry
# TODO display error stack if X-Debug were specified
res.status = HTTP_500
res.body = json.dumps(dict(error=dict(
id=uuid4().hex,
message=ex.message,
type='internal' # NOTE do not expose real type of unhandled error
)))
def _not_found_responder(req, res, **kwargs):
# TODO send error to sentry
res.status = HTTP_404
res.body = json.dumps(dict(error=dict(
id=uuid4().hex,
message='Requested resource is not found',
type='notFound'
)))
def _create_method_not_allowed_responder(allowed_methods):
allowed = ', '.join(allowed_methods)
def method_not_allowed(req, res, **kwargs):
res.status = HTTP_405
res.set_header('Allow', allowed)
res.body = json.dumps(dict(error=dict(
id=uuid4().hex,
message='The method is not allowed for the requested resource',
type='methodNotAllowed',
allowed_methods=allowed_methods
)))
return method_not_allowed
def _patch_responders():
# There are no way to override Falcon responders other than monkey patching
falcon.responders.path_not_found = _not_found_responder
falcon.responders.create_method_not_allowed \
= _create_method_not_allowed_responder
# TODO consider also patch default bad_request responder
def setup_error_handlers(app):
"""
@type app: falcon.API
"""
_patch_responders()
app.add_error_handler(BaseAppError, _handle_app_error)
app.add_error_handler(Exception, _handle_internal_error)
|
Add handlers for internal/user errors, not found, method not allowedfrom __future__ import unicode_literals, absolute_import, division
import re
import json
from uuid import uuid4
import falcon.responders
from falcon import HTTP_500, HTTP_404, HTTP_405
from app.common.errors import BaseAppError
from app.common.text_utils import to_first_lower
def _handle_app_error(ex, req, res, params):
# TODO send error to sentry
# TODO display error stack if X-Debug were specified
res.status = HTTP_500
res.body = json.dumps(dict(error=dict(
id=uuid4().hex,
message=ex.message,
type=re.sub('Error$', '', to_first_lower(type(ex).__name__))
)))
def _handle_internal_error(ex, req, res, params):
# TODO send error to sentry
# TODO display error stack if X-Debug were specified
res.status = HTTP_500
res.body = json.dumps(dict(error=dict(
id=uuid4().hex,
message=ex.message,
type='internal' # NOTE do not expose real type of unhandled error
)))
def _not_found_responder(req, res, **kwargs):
# TODO send error to sentry
res.status = HTTP_404
res.body = json.dumps(dict(error=dict(
id=uuid4().hex,
message='Requested resource is not found',
type='notFound'
)))
def _create_method_not_allowed_responder(allowed_methods):
allowed = ', '.join(allowed_methods)
def method_not_allowed(req, res, **kwargs):
res.status = HTTP_405
res.set_header('Allow', allowed)
res.body = json.dumps(dict(error=dict(
id=uuid4().hex,
message='The method is not allowed for the requested resource',
type='methodNotAllowed',
allowed_methods=allowed_methods
)))
return method_not_allowed
def _patch_responders():
# There are no way to override Falcon responders other than monkey patching
falcon.responders.path_not_found = _not_found_responder
falcon.responders.create_method_not_allowed \
= _create_method_not_allowed_responder
# TODO consider also patch default bad_request responder
def setup_error_handlers(app):
"""
@type app: falcon.API
"""
_patch_responders()
app.add_error_handler(BaseAppError, _handle_app_error)
app.add_error_handler(Exception, _handle_internal_error)
|
<commit_before><commit_msg>Add handlers for internal/user errors, not found, method not allowed<commit_after>from __future__ import unicode_literals, absolute_import, division
import re
import json
from uuid import uuid4
import falcon.responders
from falcon import HTTP_500, HTTP_404, HTTP_405
from app.common.errors import BaseAppError
from app.common.text_utils import to_first_lower
def _handle_app_error(ex, req, res, params):
# TODO send error to sentry
# TODO display error stack if X-Debug were specified
res.status = HTTP_500
res.body = json.dumps(dict(error=dict(
id=uuid4().hex,
message=ex.message,
type=re.sub('Error$', '', to_first_lower(type(ex).__name__))
)))
def _handle_internal_error(ex, req, res, params):
# TODO send error to sentry
# TODO display error stack if X-Debug were specified
res.status = HTTP_500
res.body = json.dumps(dict(error=dict(
id=uuid4().hex,
message=ex.message,
type='internal' # NOTE do not expose real type of unhandled error
)))
def _not_found_responder(req, res, **kwargs):
# TODO send error to sentry
res.status = HTTP_404
res.body = json.dumps(dict(error=dict(
id=uuid4().hex,
message='Requested resource is not found',
type='notFound'
)))
def _create_method_not_allowed_responder(allowed_methods):
allowed = ', '.join(allowed_methods)
def method_not_allowed(req, res, **kwargs):
res.status = HTTP_405
res.set_header('Allow', allowed)
res.body = json.dumps(dict(error=dict(
id=uuid4().hex,
message='The method is not allowed for the requested resource',
type='methodNotAllowed',
allowed_methods=allowed_methods
)))
return method_not_allowed
def _patch_responders():
# There are no way to override Falcon responders other than monkey patching
falcon.responders.path_not_found = _not_found_responder
falcon.responders.create_method_not_allowed \
= _create_method_not_allowed_responder
# TODO consider also patch default bad_request responder
def setup_error_handlers(app):
"""
@type app: falcon.API
"""
_patch_responders()
app.add_error_handler(BaseAppError, _handle_app_error)
app.add_error_handler(Exception, _handle_internal_error)
|
|
4718be19fd198ca0a2eaf4582dc283d52f40d42a
|
scripts/check-used-functions.py
|
scripts/check-used-functions.py
|
#!/usr/bin/env python
"""
Check that all the functions defined in the chemfiles-sys crate are
effectivelly used in the chemfiles binding.
"""
import os
ROOT = os.path.dirname(os.path.dirname(__file__))
def functions_list():
functions = []
with open(os.path.join(ROOT, "src", "chemfiles", "ffi.py")) as fd:
for line in fd:
line = line.strip()
if line.startswith("# Function"):
name = line.split('"')[1]
functions.append(name)
return functions
def read_all_source():
source = ""
for (dirpath, _, pathes) in os.walk(os.path.join(ROOT, "src")):
for path in pathes:
if path != "ffi.py":
with open(os.path.join(ROOT, dirpath, path)) as fd:
source += fd.read()
return source
def check_functions(functions, source):
for function in functions:
if function not in source:
print("Missing: " + function)
if __name__ == '__main__':
functions = functions_list()
source = read_all_source()
check_functions(functions, source)
|
Add a script to check if all function are wraped to Python
|
Add a script to check if all function are wraped to Python
|
Python
|
mpl-2.0
|
Luthaf/Chemharp-python
|
Add a script to check if all function are wraped to Python
|
#!/usr/bin/env python
"""
Check that all the functions defined in the chemfiles-sys crate are
effectivelly used in the chemfiles binding.
"""
import os
ROOT = os.path.dirname(os.path.dirname(__file__))
def functions_list():
functions = []
with open(os.path.join(ROOT, "src", "chemfiles", "ffi.py")) as fd:
for line in fd:
line = line.strip()
if line.startswith("# Function"):
name = line.split('"')[1]
functions.append(name)
return functions
def read_all_source():
source = ""
for (dirpath, _, pathes) in os.walk(os.path.join(ROOT, "src")):
for path in pathes:
if path != "ffi.py":
with open(os.path.join(ROOT, dirpath, path)) as fd:
source += fd.read()
return source
def check_functions(functions, source):
for function in functions:
if function not in source:
print("Missing: " + function)
if __name__ == '__main__':
functions = functions_list()
source = read_all_source()
check_functions(functions, source)
|
<commit_before><commit_msg>Add a script to check if all function are wraped to Python<commit_after>
|
#!/usr/bin/env python
"""
Check that all the functions defined in the chemfiles-sys crate are
effectivelly used in the chemfiles binding.
"""
import os
ROOT = os.path.dirname(os.path.dirname(__file__))
def functions_list():
functions = []
with open(os.path.join(ROOT, "src", "chemfiles", "ffi.py")) as fd:
for line in fd:
line = line.strip()
if line.startswith("# Function"):
name = line.split('"')[1]
functions.append(name)
return functions
def read_all_source():
source = ""
for (dirpath, _, pathes) in os.walk(os.path.join(ROOT, "src")):
for path in pathes:
if path != "ffi.py":
with open(os.path.join(ROOT, dirpath, path)) as fd:
source += fd.read()
return source
def check_functions(functions, source):
for function in functions:
if function not in source:
print("Missing: " + function)
if __name__ == '__main__':
functions = functions_list()
source = read_all_source()
check_functions(functions, source)
|
Add a script to check if all function are wraped to Python#!/usr/bin/env python
"""
Check that all the functions defined in the chemfiles-sys crate are
effectivelly used in the chemfiles binding.
"""
import os
ROOT = os.path.dirname(os.path.dirname(__file__))
def functions_list():
functions = []
with open(os.path.join(ROOT, "src", "chemfiles", "ffi.py")) as fd:
for line in fd:
line = line.strip()
if line.startswith("# Function"):
name = line.split('"')[1]
functions.append(name)
return functions
def read_all_source():
source = ""
for (dirpath, _, pathes) in os.walk(os.path.join(ROOT, "src")):
for path in pathes:
if path != "ffi.py":
with open(os.path.join(ROOT, dirpath, path)) as fd:
source += fd.read()
return source
def check_functions(functions, source):
for function in functions:
if function not in source:
print("Missing: " + function)
if __name__ == '__main__':
functions = functions_list()
source = read_all_source()
check_functions(functions, source)
|
<commit_before><commit_msg>Add a script to check if all function are wraped to Python<commit_after>#!/usr/bin/env python
"""
Check that all the functions defined in the chemfiles-sys crate are
effectivelly used in the chemfiles binding.
"""
import os
ROOT = os.path.dirname(os.path.dirname(__file__))
def functions_list():
functions = []
with open(os.path.join(ROOT, "src", "chemfiles", "ffi.py")) as fd:
for line in fd:
line = line.strip()
if line.startswith("# Function"):
name = line.split('"')[1]
functions.append(name)
return functions
def read_all_source():
source = ""
for (dirpath, _, pathes) in os.walk(os.path.join(ROOT, "src")):
for path in pathes:
if path != "ffi.py":
with open(os.path.join(ROOT, dirpath, path)) as fd:
source += fd.read()
return source
def check_functions(functions, source):
for function in functions:
if function not in source:
print("Missing: " + function)
if __name__ == '__main__':
functions = functions_list()
source = read_all_source()
check_functions(functions, source)
|
|
77e489734f0206ba454c7b855615451b7cf021e1
|
djangae/blobstore_service.py
|
djangae/blobstore_service.py
|
import os
import threading
import logging
blobstore_service = None
server = None
def start_blobstore_service():
"""
When the blobstore files API was deprecated, the blobstore storage was switched
to use a POST request to the upload handler when storing files uploaded via Django.
Unfortunately this breaks in the local sandbox when you aren't running the dev_appserver
because there is no server to handle the blobstore upload. So, this service is kicked
off by the local sandbox and only handles blobstore uploads. When runserver kicks in
this service is stopped.
"""
global blobstore_service
global server
from wsgiref.simple_server import make_server, demo_app
from google.appengine.tools.devappserver2.blob_upload import Application
port = int(os.environ['SERVER_PORT'])
logging.info("Starting blobstore service on port %s", port)
server = make_server('', port, Application(demo_app))
blobstore_service = threading.Thread(target=server.serve_forever)
blobstore_service.daemon = True
blobstore_service.start()
def stop_blobstore_service():
global blobstore_service
global server
if not blobstore_service:
return
server.shutdown()
blobstore_service.join(5)
blobstore_service = None
|
import os
import threading
import logging
blobstore_service = None
server = None
def start_blobstore_service():
"""
When the blobstore files API was deprecated, the blobstore storage was switched
to use a POST request to the upload handler when storing files uploaded via Django.
Unfortunately this breaks in the local sandbox when you aren't running the dev_appserver
because there is no server to handle the blobstore upload. So, this service is kicked
off by the local sandbox and only handles blobstore uploads. When runserver kicks in
this service is stopped.
"""
global blobstore_service
global server
from wsgiref.simple_server import make_server
from google.appengine.tools.devappserver2.blob_upload import Application
from djangae.views import internalupload
from django.core.handlers.wsgi import WSGIRequest
from django.utils.encoding import force_str
def handler(environ, start_response):
request = WSGIRequest(environ)
response = internalupload(request)
status = '%s %s' % (response.status_code, response.reason_phrase)
response_headers = [(str(k), str(v)) for k, v in response.items()]
start_response(force_str(status), response_headers)
return response
port = int(os.environ['SERVER_PORT'])
logging.info("Starting blobstore service on port %s", port)
server = make_server('', port, Application(handler))
blobstore_service = threading.Thread(target=server.serve_forever)
blobstore_service.daemon = True
blobstore_service.start()
def stop_blobstore_service():
global blobstore_service
global server
if not blobstore_service:
return
server.shutdown()
blobstore_service.join(5)
blobstore_service = None
|
Make the blobstore service wsgi handler only serve our upload handler view
|
Make the blobstore service wsgi handler only serve our upload handler view
|
Python
|
bsd-3-clause
|
potatolondon/djangae,leekchan/djangae,armirusco/djangae,armirusco/djangae,chargrizzle/djangae,chargrizzle/djangae,asendecka/djangae,jscissr/djangae,grzes/djangae,jscissr/djangae,kirberich/djangae,trik/djangae,chargrizzle/djangae,wangjun/djangae,wangjun/djangae,asendecka/djangae,asendecka/djangae,kirberich/djangae,SiPiggles/djangae,trik/djangae,pablorecio/djangae,martinogden/djangae,leekchan/djangae,grzes/djangae,jscissr/djangae,martinogden/djangae,potatolondon/djangae,kirberich/djangae,grzes/djangae,SiPiggles/djangae,leekchan/djangae,trik/djangae,pablorecio/djangae,pablorecio/djangae,armirusco/djangae,wangjun/djangae,SiPiggles/djangae,martinogden/djangae
|
import os
import threading
import logging
blobstore_service = None
server = None
def start_blobstore_service():
"""
When the blobstore files API was deprecated, the blobstore storage was switched
to use a POST request to the upload handler when storing files uploaded via Django.
Unfortunately this breaks in the local sandbox when you aren't running the dev_appserver
because there is no server to handle the blobstore upload. So, this service is kicked
off by the local sandbox and only handles blobstore uploads. When runserver kicks in
this service is stopped.
"""
global blobstore_service
global server
from wsgiref.simple_server import make_server, demo_app
from google.appengine.tools.devappserver2.blob_upload import Application
port = int(os.environ['SERVER_PORT'])
logging.info("Starting blobstore service on port %s", port)
server = make_server('', port, Application(demo_app))
blobstore_service = threading.Thread(target=server.serve_forever)
blobstore_service.daemon = True
blobstore_service.start()
def stop_blobstore_service():
global blobstore_service
global server
if not blobstore_service:
return
server.shutdown()
blobstore_service.join(5)
blobstore_service = None
Make the blobstore service wsgi handler only serve our upload handler view
|
import os
import threading
import logging
blobstore_service = None
server = None
def start_blobstore_service():
"""
When the blobstore files API was deprecated, the blobstore storage was switched
to use a POST request to the upload handler when storing files uploaded via Django.
Unfortunately this breaks in the local sandbox when you aren't running the dev_appserver
because there is no server to handle the blobstore upload. So, this service is kicked
off by the local sandbox and only handles blobstore uploads. When runserver kicks in
this service is stopped.
"""
global blobstore_service
global server
from wsgiref.simple_server import make_server
from google.appengine.tools.devappserver2.blob_upload import Application
from djangae.views import internalupload
from django.core.handlers.wsgi import WSGIRequest
from django.utils.encoding import force_str
def handler(environ, start_response):
request = WSGIRequest(environ)
response = internalupload(request)
status = '%s %s' % (response.status_code, response.reason_phrase)
response_headers = [(str(k), str(v)) for k, v in response.items()]
start_response(force_str(status), response_headers)
return response
port = int(os.environ['SERVER_PORT'])
logging.info("Starting blobstore service on port %s", port)
server = make_server('', port, Application(handler))
blobstore_service = threading.Thread(target=server.serve_forever)
blobstore_service.daemon = True
blobstore_service.start()
def stop_blobstore_service():
global blobstore_service
global server
if not blobstore_service:
return
server.shutdown()
blobstore_service.join(5)
blobstore_service = None
|
<commit_before>import os
import threading
import logging
blobstore_service = None
server = None
def start_blobstore_service():
"""
When the blobstore files API was deprecated, the blobstore storage was switched
to use a POST request to the upload handler when storing files uploaded via Django.
Unfortunately this breaks in the local sandbox when you aren't running the dev_appserver
because there is no server to handle the blobstore upload. So, this service is kicked
off by the local sandbox and only handles blobstore uploads. When runserver kicks in
this service is stopped.
"""
global blobstore_service
global server
from wsgiref.simple_server import make_server, demo_app
from google.appengine.tools.devappserver2.blob_upload import Application
port = int(os.environ['SERVER_PORT'])
logging.info("Starting blobstore service on port %s", port)
server = make_server('', port, Application(demo_app))
blobstore_service = threading.Thread(target=server.serve_forever)
blobstore_service.daemon = True
blobstore_service.start()
def stop_blobstore_service():
global blobstore_service
global server
if not blobstore_service:
return
server.shutdown()
blobstore_service.join(5)
blobstore_service = None
<commit_msg>Make the blobstore service wsgi handler only serve our upload handler view<commit_after>
|
import os
import threading
import logging
blobstore_service = None
server = None
def start_blobstore_service():
"""
When the blobstore files API was deprecated, the blobstore storage was switched
to use a POST request to the upload handler when storing files uploaded via Django.
Unfortunately this breaks in the local sandbox when you aren't running the dev_appserver
because there is no server to handle the blobstore upload. So, this service is kicked
off by the local sandbox and only handles blobstore uploads. When runserver kicks in
this service is stopped.
"""
global blobstore_service
global server
from wsgiref.simple_server import make_server
from google.appengine.tools.devappserver2.blob_upload import Application
from djangae.views import internalupload
from django.core.handlers.wsgi import WSGIRequest
from django.utils.encoding import force_str
def handler(environ, start_response):
request = WSGIRequest(environ)
response = internalupload(request)
status = '%s %s' % (response.status_code, response.reason_phrase)
response_headers = [(str(k), str(v)) for k, v in response.items()]
start_response(force_str(status), response_headers)
return response
port = int(os.environ['SERVER_PORT'])
logging.info("Starting blobstore service on port %s", port)
server = make_server('', port, Application(handler))
blobstore_service = threading.Thread(target=server.serve_forever)
blobstore_service.daemon = True
blobstore_service.start()
def stop_blobstore_service():
global blobstore_service
global server
if not blobstore_service:
return
server.shutdown()
blobstore_service.join(5)
blobstore_service = None
|
import os
import threading
import logging
blobstore_service = None
server = None
def start_blobstore_service():
"""
When the blobstore files API was deprecated, the blobstore storage was switched
to use a POST request to the upload handler when storing files uploaded via Django.
Unfortunately this breaks in the local sandbox when you aren't running the dev_appserver
because there is no server to handle the blobstore upload. So, this service is kicked
off by the local sandbox and only handles blobstore uploads. When runserver kicks in
this service is stopped.
"""
global blobstore_service
global server
from wsgiref.simple_server import make_server, demo_app
from google.appengine.tools.devappserver2.blob_upload import Application
port = int(os.environ['SERVER_PORT'])
logging.info("Starting blobstore service on port %s", port)
server = make_server('', port, Application(demo_app))
blobstore_service = threading.Thread(target=server.serve_forever)
blobstore_service.daemon = True
blobstore_service.start()
def stop_blobstore_service():
global blobstore_service
global server
if not blobstore_service:
return
server.shutdown()
blobstore_service.join(5)
blobstore_service = None
Make the blobstore service wsgi handler only serve our upload handler viewimport os
import threading
import logging
blobstore_service = None
server = None
def start_blobstore_service():
"""
When the blobstore files API was deprecated, the blobstore storage was switched
to use a POST request to the upload handler when storing files uploaded via Django.
Unfortunately this breaks in the local sandbox when you aren't running the dev_appserver
because there is no server to handle the blobstore upload. So, this service is kicked
off by the local sandbox and only handles blobstore uploads. When runserver kicks in
this service is stopped.
"""
global blobstore_service
global server
from wsgiref.simple_server import make_server
from google.appengine.tools.devappserver2.blob_upload import Application
from djangae.views import internalupload
from django.core.handlers.wsgi import WSGIRequest
from django.utils.encoding import force_str
def handler(environ, start_response):
request = WSGIRequest(environ)
response = internalupload(request)
status = '%s %s' % (response.status_code, response.reason_phrase)
response_headers = [(str(k), str(v)) for k, v in response.items()]
start_response(force_str(status), response_headers)
return response
port = int(os.environ['SERVER_PORT'])
logging.info("Starting blobstore service on port %s", port)
server = make_server('', port, Application(handler))
blobstore_service = threading.Thread(target=server.serve_forever)
blobstore_service.daemon = True
blobstore_service.start()
def stop_blobstore_service():
global blobstore_service
global server
if not blobstore_service:
return
server.shutdown()
blobstore_service.join(5)
blobstore_service = None
|
<commit_before>import os
import threading
import logging
blobstore_service = None
server = None
def start_blobstore_service():
"""
When the blobstore files API was deprecated, the blobstore storage was switched
to use a POST request to the upload handler when storing files uploaded via Django.
Unfortunately this breaks in the local sandbox when you aren't running the dev_appserver
because there is no server to handle the blobstore upload. So, this service is kicked
off by the local sandbox and only handles blobstore uploads. When runserver kicks in
this service is stopped.
"""
global blobstore_service
global server
from wsgiref.simple_server import make_server, demo_app
from google.appengine.tools.devappserver2.blob_upload import Application
port = int(os.environ['SERVER_PORT'])
logging.info("Starting blobstore service on port %s", port)
server = make_server('', port, Application(demo_app))
blobstore_service = threading.Thread(target=server.serve_forever)
blobstore_service.daemon = True
blobstore_service.start()
def stop_blobstore_service():
global blobstore_service
global server
if not blobstore_service:
return
server.shutdown()
blobstore_service.join(5)
blobstore_service = None
<commit_msg>Make the blobstore service wsgi handler only serve our upload handler view<commit_after>import os
import threading
import logging
blobstore_service = None
server = None
def start_blobstore_service():
"""
When the blobstore files API was deprecated, the blobstore storage was switched
to use a POST request to the upload handler when storing files uploaded via Django.
Unfortunately this breaks in the local sandbox when you aren't running the dev_appserver
because there is no server to handle the blobstore upload. So, this service is kicked
off by the local sandbox and only handles blobstore uploads. When runserver kicks in
this service is stopped.
"""
global blobstore_service
global server
from wsgiref.simple_server import make_server
from google.appengine.tools.devappserver2.blob_upload import Application
from djangae.views import internalupload
from django.core.handlers.wsgi import WSGIRequest
from django.utils.encoding import force_str
def handler(environ, start_response):
request = WSGIRequest(environ)
response = internalupload(request)
status = '%s %s' % (response.status_code, response.reason_phrase)
response_headers = [(str(k), str(v)) for k, v in response.items()]
start_response(force_str(status), response_headers)
return response
port = int(os.environ['SERVER_PORT'])
logging.info("Starting blobstore service on port %s", port)
server = make_server('', port, Application(handler))
blobstore_service = threading.Thread(target=server.serve_forever)
blobstore_service.daemon = True
blobstore_service.start()
def stop_blobstore_service():
global blobstore_service
global server
if not blobstore_service:
return
server.shutdown()
blobstore_service.join(5)
blobstore_service = None
|
61fe7343a7792d388edb074a0c8e4382e2d8adc1
|
color_transformations_skimage.py
|
color_transformations_skimage.py
|
import numpy as np
import matplotlib.colors as mcolors
from skimage.color import rgb2lab as rgb2lab_skimage
from skimage.color import lab2rgb as lab2rgb_skimage
class RGBRangeError(Exception):
pass
def rgb2lab(rgb):
rgb = np.asarray(rgb).reshape(1, 1, 3)
lab = rgb2lab_skimage(rgb).reshape(3)
return lab
def lab2rgb(lab, assert_valid=False, clip=False):
lab = np.asarray(lab).reshape(1, 1, 3)
rgb = lab2rgb_skimage(lab).reshape(3)
if assert_valid and ((rgb < 0.0).any() or (rgb > 1.0).any()):
raise RGBRangeError()
if clip:
rgb = np.clip(rgb, 0., 1.)
return rgb
def lab2rgba(lab, assert_valid=False, clip=False):
r, g, b = lab2rgb(lab, assert_valid=assert_valid, clip=clip)
return np.array([r, g, b, 1.])
def linear_colormap(pt1, pt2, coordspace='RGB'):
"""
Define a perceptually linear colormap defined through a line in the
CIELab [1] color space. The line is defined by its endpoints `pt1`,
`pt2`. The argument `coordspace` can be either `RGB` (the default)
or `lab` and specifies whether the coordinates of `pt1`, `pt2` are
given in RGB or Lab coordinates.
[1] http://dba.med.sc.edu/price/irf/Adobe_tg/models/cielab.html
"""
if coordspace == 'RGB':
pt1 = np.array(rgb2lab(pt1))
pt2 = np.array(rgb2lab(pt2))
elif coordspace == 'Lab':
pt1 = np.array(pt1)
pt2 = np.array(pt2)
else:
raise ValueError("Argument 'coordspace' must be either 'RGB' "
"or 'Lab'. Got: {}".format(coordspace))
tvals = np.linspace(0, 1, 256)
path_vals = np.array([(1-t) * pt1 + t * pt2 for t in tvals])
cmap_vals = np.array([lab2rgb(pt) for pt in path_vals])
#print np.where(cmap_vals < 0)
cmap = mcolors.ListedColormap(cmap_vals)
return cmap
|
Use color transformation functions from scikit-image for now, even though it is a bit slower (there seems to be a bug somewhere in my manual implementation).
|
Use color transformation functions from scikit-image for now, even though it is a bit slower (there seems to be a bug somewhere in my manual implementation).
|
Python
|
mit
|
maxalbert/colormap-selector
|
Use color transformation functions from scikit-image for now, even though it is a bit slower (there seems to be a bug somewhere in my manual implementation).
|
import numpy as np
import matplotlib.colors as mcolors
from skimage.color import rgb2lab as rgb2lab_skimage
from skimage.color import lab2rgb as lab2rgb_skimage
class RGBRangeError(Exception):
pass
def rgb2lab(rgb):
rgb = np.asarray(rgb).reshape(1, 1, 3)
lab = rgb2lab_skimage(rgb).reshape(3)
return lab
def lab2rgb(lab, assert_valid=False, clip=False):
lab = np.asarray(lab).reshape(1, 1, 3)
rgb = lab2rgb_skimage(lab).reshape(3)
if assert_valid and ((rgb < 0.0).any() or (rgb > 1.0).any()):
raise RGBRangeError()
if clip:
rgb = np.clip(rgb, 0., 1.)
return rgb
def lab2rgba(lab, assert_valid=False, clip=False):
r, g, b = lab2rgb(lab, assert_valid=assert_valid, clip=clip)
return np.array([r, g, b, 1.])
def linear_colormap(pt1, pt2, coordspace='RGB'):
"""
Define a perceptually linear colormap defined through a line in the
CIELab [1] color space. The line is defined by its endpoints `pt1`,
`pt2`. The argument `coordspace` can be either `RGB` (the default)
or `lab` and specifies whether the coordinates of `pt1`, `pt2` are
given in RGB or Lab coordinates.
[1] http://dba.med.sc.edu/price/irf/Adobe_tg/models/cielab.html
"""
if coordspace == 'RGB':
pt1 = np.array(rgb2lab(pt1))
pt2 = np.array(rgb2lab(pt2))
elif coordspace == 'Lab':
pt1 = np.array(pt1)
pt2 = np.array(pt2)
else:
raise ValueError("Argument 'coordspace' must be either 'RGB' "
"or 'Lab'. Got: {}".format(coordspace))
tvals = np.linspace(0, 1, 256)
path_vals = np.array([(1-t) * pt1 + t * pt2 for t in tvals])
cmap_vals = np.array([lab2rgb(pt) for pt in path_vals])
#print np.where(cmap_vals < 0)
cmap = mcolors.ListedColormap(cmap_vals)
return cmap
|
<commit_before><commit_msg>Use color transformation functions from scikit-image for now, even though it is a bit slower (there seems to be a bug somewhere in my manual implementation).<commit_after>
|
import numpy as np
import matplotlib.colors as mcolors
from skimage.color import rgb2lab as rgb2lab_skimage
from skimage.color import lab2rgb as lab2rgb_skimage
class RGBRangeError(Exception):
pass
def rgb2lab(rgb):
rgb = np.asarray(rgb).reshape(1, 1, 3)
lab = rgb2lab_skimage(rgb).reshape(3)
return lab
def lab2rgb(lab, assert_valid=False, clip=False):
lab = np.asarray(lab).reshape(1, 1, 3)
rgb = lab2rgb_skimage(lab).reshape(3)
if assert_valid and ((rgb < 0.0).any() or (rgb > 1.0).any()):
raise RGBRangeError()
if clip:
rgb = np.clip(rgb, 0., 1.)
return rgb
def lab2rgba(lab, assert_valid=False, clip=False):
r, g, b = lab2rgb(lab, assert_valid=assert_valid, clip=clip)
return np.array([r, g, b, 1.])
def linear_colormap(pt1, pt2, coordspace='RGB'):
"""
Define a perceptually linear colormap defined through a line in the
CIELab [1] color space. The line is defined by its endpoints `pt1`,
`pt2`. The argument `coordspace` can be either `RGB` (the default)
or `lab` and specifies whether the coordinates of `pt1`, `pt2` are
given in RGB or Lab coordinates.
[1] http://dba.med.sc.edu/price/irf/Adobe_tg/models/cielab.html
"""
if coordspace == 'RGB':
pt1 = np.array(rgb2lab(pt1))
pt2 = np.array(rgb2lab(pt2))
elif coordspace == 'Lab':
pt1 = np.array(pt1)
pt2 = np.array(pt2)
else:
raise ValueError("Argument 'coordspace' must be either 'RGB' "
"or 'Lab'. Got: {}".format(coordspace))
tvals = np.linspace(0, 1, 256)
path_vals = np.array([(1-t) * pt1 + t * pt2 for t in tvals])
cmap_vals = np.array([lab2rgb(pt) for pt in path_vals])
#print np.where(cmap_vals < 0)
cmap = mcolors.ListedColormap(cmap_vals)
return cmap
|
Use color transformation functions from scikit-image for now, even though it is a bit slower (there seems to be a bug somewhere in my manual implementation).import numpy as np
import matplotlib.colors as mcolors
from skimage.color import rgb2lab as rgb2lab_skimage
from skimage.color import lab2rgb as lab2rgb_skimage
class RGBRangeError(Exception):
pass
def rgb2lab(rgb):
rgb = np.asarray(rgb).reshape(1, 1, 3)
lab = rgb2lab_skimage(rgb).reshape(3)
return lab
def lab2rgb(lab, assert_valid=False, clip=False):
lab = np.asarray(lab).reshape(1, 1, 3)
rgb = lab2rgb_skimage(lab).reshape(3)
if assert_valid and ((rgb < 0.0).any() or (rgb > 1.0).any()):
raise RGBRangeError()
if clip:
rgb = np.clip(rgb, 0., 1.)
return rgb
def lab2rgba(lab, assert_valid=False, clip=False):
r, g, b = lab2rgb(lab, assert_valid=assert_valid, clip=clip)
return np.array([r, g, b, 1.])
def linear_colormap(pt1, pt2, coordspace='RGB'):
"""
Define a perceptually linear colormap defined through a line in the
CIELab [1] color space. The line is defined by its endpoints `pt1`,
`pt2`. The argument `coordspace` can be either `RGB` (the default)
or `lab` and specifies whether the coordinates of `pt1`, `pt2` are
given in RGB or Lab coordinates.
[1] http://dba.med.sc.edu/price/irf/Adobe_tg/models/cielab.html
"""
if coordspace == 'RGB':
pt1 = np.array(rgb2lab(pt1))
pt2 = np.array(rgb2lab(pt2))
elif coordspace == 'Lab':
pt1 = np.array(pt1)
pt2 = np.array(pt2)
else:
raise ValueError("Argument 'coordspace' must be either 'RGB' "
"or 'Lab'. Got: {}".format(coordspace))
tvals = np.linspace(0, 1, 256)
path_vals = np.array([(1-t) * pt1 + t * pt2 for t in tvals])
cmap_vals = np.array([lab2rgb(pt) for pt in path_vals])
#print np.where(cmap_vals < 0)
cmap = mcolors.ListedColormap(cmap_vals)
return cmap
|
<commit_before><commit_msg>Use color transformation functions from scikit-image for now, even though it is a bit slower (there seems to be a bug somewhere in my manual implementation).<commit_after>import numpy as np
import matplotlib.colors as mcolors
from skimage.color import rgb2lab as rgb2lab_skimage
from skimage.color import lab2rgb as lab2rgb_skimage
class RGBRangeError(Exception):
pass
def rgb2lab(rgb):
rgb = np.asarray(rgb).reshape(1, 1, 3)
lab = rgb2lab_skimage(rgb).reshape(3)
return lab
def lab2rgb(lab, assert_valid=False, clip=False):
lab = np.asarray(lab).reshape(1, 1, 3)
rgb = lab2rgb_skimage(lab).reshape(3)
if assert_valid and ((rgb < 0.0).any() or (rgb > 1.0).any()):
raise RGBRangeError()
if clip:
rgb = np.clip(rgb, 0., 1.)
return rgb
def lab2rgba(lab, assert_valid=False, clip=False):
r, g, b = lab2rgb(lab, assert_valid=assert_valid, clip=clip)
return np.array([r, g, b, 1.])
def linear_colormap(pt1, pt2, coordspace='RGB'):
"""
Define a perceptually linear colormap defined through a line in the
CIELab [1] color space. The line is defined by its endpoints `pt1`,
`pt2`. The argument `coordspace` can be either `RGB` (the default)
or `lab` and specifies whether the coordinates of `pt1`, `pt2` are
given in RGB or Lab coordinates.
[1] http://dba.med.sc.edu/price/irf/Adobe_tg/models/cielab.html
"""
if coordspace == 'RGB':
pt1 = np.array(rgb2lab(pt1))
pt2 = np.array(rgb2lab(pt2))
elif coordspace == 'Lab':
pt1 = np.array(pt1)
pt2 = np.array(pt2)
else:
raise ValueError("Argument 'coordspace' must be either 'RGB' "
"or 'Lab'. Got: {}".format(coordspace))
tvals = np.linspace(0, 1, 256)
path_vals = np.array([(1-t) * pt1 + t * pt2 for t in tvals])
cmap_vals = np.array([lab2rgb(pt) for pt in path_vals])
#print np.where(cmap_vals < 0)
cmap = mcolors.ListedColormap(cmap_vals)
return cmap
|
|
e7b78b3de38faed02ed2168cd6b8f1cae2cbf575
|
src/core/migrations/0073_auto_20220630_1608.py
|
src/core/migrations/0073_auto_20220630_1608.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2022-06-30 14:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0072_auto_20220623_1028'),
]
operations = [
migrations.AlterField(
model_name='account',
name='username',
field=models.CharField(max_length=254, unique=True, verbose_name='Username'),
),
]
|
Add migration for new `max_length` of field `username`.
|
Add migration for new `max_length` of field `username`.
|
Python
|
agpl-3.0
|
BirkbeckCTP/janeway,BirkbeckCTP/janeway,BirkbeckCTP/janeway,BirkbeckCTP/janeway
|
Add migration for new `max_length` of field `username`.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2022-06-30 14:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0072_auto_20220623_1028'),
]
operations = [
migrations.AlterField(
model_name='account',
name='username',
field=models.CharField(max_length=254, unique=True, verbose_name='Username'),
),
]
|
<commit_before><commit_msg>Add migration for new `max_length` of field `username`.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2022-06-30 14:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0072_auto_20220623_1028'),
]
operations = [
migrations.AlterField(
model_name='account',
name='username',
field=models.CharField(max_length=254, unique=True, verbose_name='Username'),
),
]
|
Add migration for new `max_length` of field `username`.# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2022-06-30 14:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0072_auto_20220623_1028'),
]
operations = [
migrations.AlterField(
model_name='account',
name='username',
field=models.CharField(max_length=254, unique=True, verbose_name='Username'),
),
]
|
<commit_before><commit_msg>Add migration for new `max_length` of field `username`.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2022-06-30 14:08
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0072_auto_20220623_1028'),
]
operations = [
migrations.AlterField(
model_name='account',
name='username',
field=models.CharField(max_length=254, unique=True, verbose_name='Username'),
),
]
|
|
0ee87c220610b2e27330af7df61ae740a909405d
|
regparser/commands/proposal_pipeline.py
|
regparser/commands/proposal_pipeline.py
|
import click
from regparser.commands.annual_editions import annual_editions
from regparser.commands.current_version import current_version
from regparser.commands.diffs import diffs
from regparser.commands.fill_with_rules import fill_with_rules
from regparser.commands.import_notice import import_notice, parse_notice
from regparser.commands.layers import layers
from regparser.commands.notice_preamble import notice_preamble
from regparser.commands.proposal_versions import proposal_versions
from regparser.commands.sync_xml import sync_xml
from regparser.commands.versions import versions
from regparser.commands.write_to import write_to
@click.command()
@click.argument('xml_file', type=click.Path(exists=True))
@click.argument('output', envvar='EREGS_OUTPUT_DIR')
@click.option('--only-latest', is_flag=True, default=False,
help="Don't derive historyl use the latest annual edition")
@click.option('--xml-ttl', type=int, default=60*60,
help='Time to cache XML downloads, in seconds')
@click.pass_context
def proposal_pipeline(ctx, xml_file, output, only_latest, xml_ttl):
"""Full proposal parsing pipeline. Reads the XML file provided, pulls out
the preamble, parses versions of the relevant CFR parts, inserts a version
for each associated with this proposal, builds layers + diffs, and writes
them out."""
ctx.invoke(sync_xml, xml_ttl=xml_ttl)
ctx.invoke(import_notice, xml_file=xml_file)
notice_xml = parse_notice(xml_file)
cfr_pairs = [(ref.title, part)
for ref in notice_xml.cfr_refs for part in ref.parts]
ctx.invoke(notice_preamble, doc_number=notice_xml.version_id)
for title, part in cfr_pairs:
if only_latest:
ctx.invoke(current_version, cfr_title=title, cfr_part=part)
else:
ctx.invoke(versions, cfr_title=title, cfr_part=part)
ctx.invoke(annual_editions, cfr_title=title, cfr_part=part)
ctx.invoke(proposal_versions, doc_number=notice_xml.version_id)
for title, part in cfr_pairs:
ctx.invoke(fill_with_rules, cfr_title=title, cfr_part=part)
ctx.invoke(diffs, cfr_title=title, cfr_part=part)
ctx.invoke(layers)
ctx.invoke(write_to, output=output)
|
Add an analog to `pipeline` to deal with proposals
|
Add an analog to `pipeline` to deal with proposals
Some of the underlying commands aren't using dependencies properly, so there's
significant rebuilding every run. That said, it works!
|
Python
|
cc0-1.0
|
tadhg-ohiggins/regulations-parser,eregs/regulations-parser,eregs/regulations-parser,tadhg-ohiggins/regulations-parser
|
Add an analog to `pipeline` to deal with proposals
Some of the underlying commands aren't using dependencies properly, so there's
significant rebuilding every run. That said, it works!
|
import click
from regparser.commands.annual_editions import annual_editions
from regparser.commands.current_version import current_version
from regparser.commands.diffs import diffs
from regparser.commands.fill_with_rules import fill_with_rules
from regparser.commands.import_notice import import_notice, parse_notice
from regparser.commands.layers import layers
from regparser.commands.notice_preamble import notice_preamble
from regparser.commands.proposal_versions import proposal_versions
from regparser.commands.sync_xml import sync_xml
from regparser.commands.versions import versions
from regparser.commands.write_to import write_to
@click.command()
@click.argument('xml_file', type=click.Path(exists=True))
@click.argument('output', envvar='EREGS_OUTPUT_DIR')
@click.option('--only-latest', is_flag=True, default=False,
help="Don't derive historyl use the latest annual edition")
@click.option('--xml-ttl', type=int, default=60*60,
help='Time to cache XML downloads, in seconds')
@click.pass_context
def proposal_pipeline(ctx, xml_file, output, only_latest, xml_ttl):
"""Full proposal parsing pipeline. Reads the XML file provided, pulls out
the preamble, parses versions of the relevant CFR parts, inserts a version
for each associated with this proposal, builds layers + diffs, and writes
them out."""
ctx.invoke(sync_xml, xml_ttl=xml_ttl)
ctx.invoke(import_notice, xml_file=xml_file)
notice_xml = parse_notice(xml_file)
cfr_pairs = [(ref.title, part)
for ref in notice_xml.cfr_refs for part in ref.parts]
ctx.invoke(notice_preamble, doc_number=notice_xml.version_id)
for title, part in cfr_pairs:
if only_latest:
ctx.invoke(current_version, cfr_title=title, cfr_part=part)
else:
ctx.invoke(versions, cfr_title=title, cfr_part=part)
ctx.invoke(annual_editions, cfr_title=title, cfr_part=part)
ctx.invoke(proposal_versions, doc_number=notice_xml.version_id)
for title, part in cfr_pairs:
ctx.invoke(fill_with_rules, cfr_title=title, cfr_part=part)
ctx.invoke(diffs, cfr_title=title, cfr_part=part)
ctx.invoke(layers)
ctx.invoke(write_to, output=output)
|
<commit_before><commit_msg>Add an analog to `pipeline` to deal with proposals
Some of the underlying commands aren't using dependencies properly, so there's
significant rebuilding every run. That said, it works!<commit_after>
|
import click
from regparser.commands.annual_editions import annual_editions
from regparser.commands.current_version import current_version
from regparser.commands.diffs import diffs
from regparser.commands.fill_with_rules import fill_with_rules
from regparser.commands.import_notice import import_notice, parse_notice
from regparser.commands.layers import layers
from regparser.commands.notice_preamble import notice_preamble
from regparser.commands.proposal_versions import proposal_versions
from regparser.commands.sync_xml import sync_xml
from regparser.commands.versions import versions
from regparser.commands.write_to import write_to
@click.command()
@click.argument('xml_file', type=click.Path(exists=True))
@click.argument('output', envvar='EREGS_OUTPUT_DIR')
@click.option('--only-latest', is_flag=True, default=False,
help="Don't derive historyl use the latest annual edition")
@click.option('--xml-ttl', type=int, default=60*60,
help='Time to cache XML downloads, in seconds')
@click.pass_context
def proposal_pipeline(ctx, xml_file, output, only_latest, xml_ttl):
"""Full proposal parsing pipeline. Reads the XML file provided, pulls out
the preamble, parses versions of the relevant CFR parts, inserts a version
for each associated with this proposal, builds layers + diffs, and writes
them out."""
ctx.invoke(sync_xml, xml_ttl=xml_ttl)
ctx.invoke(import_notice, xml_file=xml_file)
notice_xml = parse_notice(xml_file)
cfr_pairs = [(ref.title, part)
for ref in notice_xml.cfr_refs for part in ref.parts]
ctx.invoke(notice_preamble, doc_number=notice_xml.version_id)
for title, part in cfr_pairs:
if only_latest:
ctx.invoke(current_version, cfr_title=title, cfr_part=part)
else:
ctx.invoke(versions, cfr_title=title, cfr_part=part)
ctx.invoke(annual_editions, cfr_title=title, cfr_part=part)
ctx.invoke(proposal_versions, doc_number=notice_xml.version_id)
for title, part in cfr_pairs:
ctx.invoke(fill_with_rules, cfr_title=title, cfr_part=part)
ctx.invoke(diffs, cfr_title=title, cfr_part=part)
ctx.invoke(layers)
ctx.invoke(write_to, output=output)
|
Add an analog to `pipeline` to deal with proposals
Some of the underlying commands aren't using dependencies properly, so there's
significant rebuilding every run. That said, it works!import click
from regparser.commands.annual_editions import annual_editions
from regparser.commands.current_version import current_version
from regparser.commands.diffs import diffs
from regparser.commands.fill_with_rules import fill_with_rules
from regparser.commands.import_notice import import_notice, parse_notice
from regparser.commands.layers import layers
from regparser.commands.notice_preamble import notice_preamble
from regparser.commands.proposal_versions import proposal_versions
from regparser.commands.sync_xml import sync_xml
from regparser.commands.versions import versions
from regparser.commands.write_to import write_to
@click.command()
@click.argument('xml_file', type=click.Path(exists=True))
@click.argument('output', envvar='EREGS_OUTPUT_DIR')
@click.option('--only-latest', is_flag=True, default=False,
help="Don't derive historyl use the latest annual edition")
@click.option('--xml-ttl', type=int, default=60*60,
help='Time to cache XML downloads, in seconds')
@click.pass_context
def proposal_pipeline(ctx, xml_file, output, only_latest, xml_ttl):
"""Full proposal parsing pipeline. Reads the XML file provided, pulls out
the preamble, parses versions of the relevant CFR parts, inserts a version
for each associated with this proposal, builds layers + diffs, and writes
them out."""
ctx.invoke(sync_xml, xml_ttl=xml_ttl)
ctx.invoke(import_notice, xml_file=xml_file)
notice_xml = parse_notice(xml_file)
cfr_pairs = [(ref.title, part)
for ref in notice_xml.cfr_refs for part in ref.parts]
ctx.invoke(notice_preamble, doc_number=notice_xml.version_id)
for title, part in cfr_pairs:
if only_latest:
ctx.invoke(current_version, cfr_title=title, cfr_part=part)
else:
ctx.invoke(versions, cfr_title=title, cfr_part=part)
ctx.invoke(annual_editions, cfr_title=title, cfr_part=part)
ctx.invoke(proposal_versions, doc_number=notice_xml.version_id)
for title, part in cfr_pairs:
ctx.invoke(fill_with_rules, cfr_title=title, cfr_part=part)
ctx.invoke(diffs, cfr_title=title, cfr_part=part)
ctx.invoke(layers)
ctx.invoke(write_to, output=output)
|
<commit_before><commit_msg>Add an analog to `pipeline` to deal with proposals
Some of the underlying commands aren't using dependencies properly, so there's
significant rebuilding every run. That said, it works!<commit_after>import click
from regparser.commands.annual_editions import annual_editions
from regparser.commands.current_version import current_version
from regparser.commands.diffs import diffs
from regparser.commands.fill_with_rules import fill_with_rules
from regparser.commands.import_notice import import_notice, parse_notice
from regparser.commands.layers import layers
from regparser.commands.notice_preamble import notice_preamble
from regparser.commands.proposal_versions import proposal_versions
from regparser.commands.sync_xml import sync_xml
from regparser.commands.versions import versions
from regparser.commands.write_to import write_to
@click.command()
@click.argument('xml_file', type=click.Path(exists=True))
@click.argument('output', envvar='EREGS_OUTPUT_DIR')
@click.option('--only-latest', is_flag=True, default=False,
help="Don't derive historyl use the latest annual edition")
@click.option('--xml-ttl', type=int, default=60*60,
help='Time to cache XML downloads, in seconds')
@click.pass_context
def proposal_pipeline(ctx, xml_file, output, only_latest, xml_ttl):
"""Full proposal parsing pipeline. Reads the XML file provided, pulls out
the preamble, parses versions of the relevant CFR parts, inserts a version
for each associated with this proposal, builds layers + diffs, and writes
them out."""
ctx.invoke(sync_xml, xml_ttl=xml_ttl)
ctx.invoke(import_notice, xml_file=xml_file)
notice_xml = parse_notice(xml_file)
cfr_pairs = [(ref.title, part)
for ref in notice_xml.cfr_refs for part in ref.parts]
ctx.invoke(notice_preamble, doc_number=notice_xml.version_id)
for title, part in cfr_pairs:
if only_latest:
ctx.invoke(current_version, cfr_title=title, cfr_part=part)
else:
ctx.invoke(versions, cfr_title=title, cfr_part=part)
ctx.invoke(annual_editions, cfr_title=title, cfr_part=part)
ctx.invoke(proposal_versions, doc_number=notice_xml.version_id)
for title, part in cfr_pairs:
ctx.invoke(fill_with_rules, cfr_title=title, cfr_part=part)
ctx.invoke(diffs, cfr_title=title, cfr_part=part)
ctx.invoke(layers)
ctx.invoke(write_to, output=output)
|
|
9ec49fe0d699e19d9a325b8e73bff4d7a75b8de4
|
studies/migrations/0037_response_date_created.py
|
studies/migrations/0037_response_date_created.py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-11-07 22:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('studies', '0036_add_scheduled_jobs'),
]
operations = [
migrations.AddField(
model_name='response',
name='date_created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
|
Add migration to add date_created field to response.
|
Add migration to add date_created field to response.
|
Python
|
apache-2.0
|
CenterForOpenScience/lookit-api,CenterForOpenScience/lookit-api,CenterForOpenScience/lookit-api
|
Add migration to add date_created field to response.
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-11-07 22:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('studies', '0036_add_scheduled_jobs'),
]
operations = [
migrations.AddField(
model_name='response',
name='date_created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
|
<commit_before><commit_msg>Add migration to add date_created field to response.<commit_after>
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-11-07 22:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('studies', '0036_add_scheduled_jobs'),
]
operations = [
migrations.AddField(
model_name='response',
name='date_created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
|
Add migration to add date_created field to response.# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-11-07 22:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('studies', '0036_add_scheduled_jobs'),
]
operations = [
migrations.AddField(
model_name='response',
name='date_created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
|
<commit_before><commit_msg>Add migration to add date_created field to response.<commit_after># -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-11-07 22:28
from __future__ import unicode_literals
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('studies', '0036_add_scheduled_jobs'),
]
operations = [
migrations.AddField(
model_name='response',
name='date_created',
field=models.DateTimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.